sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _adjust_router_list_for_global_router(self, routers):
"""
Pushes 'Global' routers to the end of the router list, so that
deleting default route occurs before deletion of external nw subintf
"""
#ToDo(Hareesh): Simplify if possible
for r in routers:
if r[ROUTER_ROLE_ATTR] == c_constants.ROUTER_ROLE_GLOBAL:
LOG.debug("Global router:%s found. Moved to the end of list "
"for processing", r['id'])
routers.remove(r)
routers.append(r) | Pushes 'Global' routers to the end of the router list, so that
deleting default route occurs before deletion of external nw subintf | entailment |
def _process_routers(self, routers, removed_routers,
device_id=None, all_routers=False):
"""Process the set of routers.
Iterating on the set of routers received and comparing it with the
set of routers already in the routing service helper, new routers
which are added are identified. Before processing check the
reachability (via ping) of hosting device where the router is hosted.
If device is not reachable it is backlogged.
For routers which are only updated, call `_process_router()` on them.
When all_routers is set to True (because of a full sync),
this will result in the detection and deletion of routers which
have been removed.
Whether the router can only be assigned to a particular hosting device
is decided and enforced by the plugin. No checks are done here.
:param routers: The set of routers to be processed
:param removed_routers: the set of routers which where removed
:param device_id: Id of the hosting device
:param all_routers: Flag for specifying a partial list of routers
:return: None
"""
try:
ids_previously_hosted_routers = (
set(r_id for r_id, rdata in self.router_info.items()
if rdata.router.get('hosting_device',
{}).get('id') == device_id))
if all_routers:
prev_router_ids = ids_previously_hosted_routers
else:
prev_router_ids = (ids_previously_hosted_routers &
set([router['id'] for router in routers]))
cur_router_ids = set()
deleted_routerids_list = []
for r in routers:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
# identify list of routers(ids) that no longer exist
for router_id in prev_router_ids - cur_router_ids:
deleted_routerids_list.append(router_id)
if removed_routers:
self._adjust_router_list_for_global_router(removed_routers)
for router in removed_routers:
deleted_routerids_list.append(router['id'])
self._adjust_router_list_for_global_router(routers)
# First process create/updated routers
for r in routers:
LOG.debug("Processing router[id:%(id)s, role:%(role)s]",
{'id': r['id'], 'role': r[ROUTER_ROLE_ATTR]})
if r['id'] in deleted_routerids_list:
continue
if r['status'] == c_constants.ROUTER_INFO_INCOMPLETE:
# The plugin could not fill in all the info due to
# timing and db settling down. So put this router
# back in updated_routers, we will pull again on the
# sync time.
LOG.debug("Router: %(id)s INFO_INCOMPLETE",
{'id': r['id']})
self._update_updated_routers_cache([r['id']])
continue
try:
if not r['admin_state_up']:
continue
cur_router_ids.add(r['id'])
hd = r['hosting_device']
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.info("Router: %(id)s is on an unreachable "
"hosting device. ", {'id': r['id']})
continue
if r['id'] not in self.router_info:
self._router_added(r['id'], r)
ri = self.router_info[r['id']]
ri.router = r
self._process_router(ri)
except ncc_errors.SessionCloseError as e:
LOG.exception(
"ncclient Unexpected session close %s", e)
if not self._dev_status.is_hosting_device_reachable(
r['hosting_device']):
LOG.debug("Lost connectivity to hosting device %s" %
r['hosting_device']['id'])
# Will rely on heartbeat to detect hd state
# and schedule resync when hd comes back
else:
# retry the router update on the next pass
self._update_updated_routers_cache([r['id']])
LOG.debug("RETRY_RTR_UPDATE %s" % (r['id']))
continue
except KeyError as e:
LOG.exception("Key Error, missing key: %s", e)
self._update_updated_routers_cache([r['id']])
continue
except cfg_exceptions.DriverException as e:
LOG.exception("Driver Exception on router:%(id)s. "
"Error is %(e)s", {'id': r['id'],
'e': e})
self._update_updated_routers_cache([r['id']])
continue
LOG.debug("Done processing router[id:%(id)s, role:%(role)s]",
{'id': r['id'], 'role': r[ROUTER_ROLE_ATTR]})
# Finally process removed routers
for router_id in deleted_routerids_list:
LOG.debug("Processing deleted router:%s", router_id)
self._router_removed(router_id)
except Exception:
LOG.exception("Exception in processing routers on device:%s",
device_id)
self.sync_devices.add(device_id) | Process the set of routers.
Iterating on the set of routers received and comparing it with the
set of routers already in the routing service helper, new routers
which are added are identified. Before processing check the
reachability (via ping) of hosting device where the router is hosted.
If device is not reachable it is backlogged.
For routers which are only updated, call `_process_router()` on them.
When all_routers is set to True (because of a full sync),
this will result in the detection and deletion of routers which
have been removed.
Whether the router can only be assigned to a particular hosting device
is decided and enforced by the plugin. No checks are done here.
:param routers: The set of routers to be processed
:param removed_routers: the set of routers which where removed
:param device_id: Id of the hosting device
:param all_routers: Flag for specifying a partial list of routers
:return: None | entailment |
def _send_update_port_statuses(self, port_ids, status):
"""Sends update notifications to set the operational status of the
list of router ports provided. To make each notification doesn't exceed
the RPC length, each message contains a maximum of MAX_PORTS_IN_BATCH
port ids.
:param port_ids: List of ports to update the status
:param status: operational status to update
(ex: bc.constants.PORT_STATUS_ACTIVE)
"""
if not port_ids:
return
MAX_PORTS_IN_BATCH = 50
list_chunks_ports = [port_ids[i:i + MAX_PORTS_IN_BATCH]
for i in six.moves.range(0, len(port_ids), MAX_PORTS_IN_BATCH)]
for chunk_ports in list_chunks_ports:
self.plugin_rpc.send_update_port_statuses(self.context,
chunk_ports, status) | Sends update notifications to set the operational status of the
list of router ports provided. To make each notification doesn't exceed
the RPC length, each message contains a maximum of MAX_PORTS_IN_BATCH
port ids.
:param port_ids: List of ports to update the status
:param status: operational status to update
(ex: bc.constants.PORT_STATUS_ACTIVE) | entailment |
def _process_router(self, ri):
"""Process a router, apply latest configuration and update router_info.
Get the router dict from RouterInfo and proceed to detect changes
from the last known state. When new ports or deleted ports are
detected, `internal_network_added()` or `internal_networks_removed()`
are called accordingly. Similarly changes in ex_gw_port causes
`external_gateway_added()` or `external_gateway_removed()` calls.
Next, floating_ips and routes are processed. Also, latest state is
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
:param ri : RouterInfo object of the router being processed.
:return:None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
try:
ex_gw_port = ri.router.get('gw_port')
ri.ha_info = ri.router.get('ha_info', None)
gateway_set = ex_gw_port and not ri.ex_gw_port
gateway_cleared = not ex_gw_port and ri.ex_gw_port
internal_ports = ri.router.get(bc.constants.INTERFACE_KEY, [])
# Once the gateway is set, then we know which VRF this router
# belongs to. Keep track of it in our lists of routers, organized
# as a dictionary by VRF name
if gateway_set:
self._add_rid_to_vrf_list(ri)
new_ports, old_ports, change_details = (
self._get_internal_port_changes(ri, internal_ports))
list_port_ids_up = []
non_global_router_roles = [None,
c_constants.ROUTER_ROLE_HA_REDUNDANCY]
if ri.router[ROUTER_ROLE_ATTR] in non_global_router_roles:
self._process_new_ports(ri, new_ports, ex_gw_port,
list_port_ids_up, change_details)
self._process_old_ports(ri, old_ports, ex_gw_port,
change_details)
else:
self._process_new_ports_global(ri, new_ports, ex_gw_port,
list_port_ids_up)
self._process_old_ports_global(ri, old_ports, ex_gw_port)
if gateway_set:
self._process_gateway_set(ri, ex_gw_port,
list_port_ids_up)
elif gateway_cleared:
self._process_gateway_cleared(ri, ri.ex_gw_port)
self._send_update_port_statuses(list_port_ids_up,
bc.constants.PORT_STATUS_ACTIVE)
if ex_gw_port:
self._process_router_floating_ips(ri, ex_gw_port)
global_router_roles = [c_constants.ROUTER_ROLE_GLOBAL,
c_constants.ROUTER_ROLE_LOGICAL_GLOBAL]
if ri.router[ROUTER_ROLE_ATTR] not in global_router_roles:
self._enable_disable_ports(ri, ex_gw_port, internal_ports)
if gateway_cleared:
# Remove this router from the list of routers by VRF
self._remove_rid_from_vrf_list(ri)
ri.ex_gw_port = ex_gw_port
self._routes_updated(ri)
except cfg_exceptions.HAParamsMissingException as e:
self._update_updated_routers_cache([ri.router_id])
LOG.warning(e)
except cfg_exceptions.DriverException as e:
with excutils.save_and_reraise_exception():
self._update_updated_routers_cache([ri.router_id])
LOG.error(e) | Process a router, apply latest configuration and update router_info.
Get the router dict from RouterInfo and proceed to detect changes
from the last known state. When new ports or deleted ports are
detected, `internal_network_added()` or `internal_networks_removed()`
are called accordingly. Similarly changes in ex_gw_port causes
`external_gateway_added()` or `external_gateway_removed()` calls.
Next, floating_ips and routes are processed. Also, latest state is
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
:param ri : RouterInfo object of the router being processed.
:return:None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails. | entailment |
def _process_router_floating_ips(self, ri, ex_gw_port):
"""Process a router's floating ips.
Compare floatingips configured in device (i.e., those fips in
the ri.floating_ips "cache") with the router's updated floating ips
(in ri.router.floating_ips) and determine floating_ips which were
added or removed. Notify driver of the change via
`floating_ip_added()` or `floating_ip_removed()`. Also update plugin
with status of fips.
:param ri: RouterInfo object of the router being processed.
:param ex_gw_port: Port dict of the external gateway port.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
# fips that exist in neutron db (i.e., the desired "truth")
current_fips = ri.router.get(bc.constants.FLOATINGIP_KEY, [])
# ids of fips that exist in neutron db
current_fip_ids = {fip['id'] for fip in current_fips}
# ids of fips that are configured in device
configured_fip_ids = {fip['id'] for fip in ri.floating_ips}
id_to_current_fip_map = {}
fips_to_add = []
# iterate of fips that exist in neutron db
for configured_fip in current_fips:
if configured_fip['port_id']:
# store to later check if this fip has been remapped
id_to_current_fip_map[configured_fip['id']] = configured_fip
if configured_fip['id'] not in configured_fip_ids:
# Ensure that we add only after remove, in case same
# fixed_ip is mapped to different floating_ip within
# the same loop cycle. If add occurs before first,
# cfg will fail because of existing entry with
# identical fixed_ip
fips_to_add.append(configured_fip)
fip_ids_to_remove = configured_fip_ids - current_fip_ids
LOG.debug("fip_ids_to_add: %s" % fips_to_add)
LOG.debug("fip_ids_to_remove: %s" % fip_ids_to_remove)
fips_to_remove = []
fip_statuses = {}
# iterate over fips that are configured in device
for configured_fip in ri.floating_ips:
if configured_fip['id'] in fip_ids_to_remove:
fips_to_remove.append(configured_fip)
self._floating_ip_removed(
ri, ri.ex_gw_port, configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_DOWN)
LOG.debug("Add to fip_statuses DOWN id:%s fl_ip:%s fx_ip:%s",
configured_fip['id'],
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
else:
# handle possibly required remapping of a fip
# ip address that fip currently is configured for
configured_fixed_ip = configured_fip['fixed_ip_address']
new_fip = id_to_current_fip_map[configured_fip['id']]
# ip address that fip should be configured for
current_fixed_ip = new_fip['fixed_ip_address']
if (current_fixed_ip and configured_fixed_ip and
current_fixed_ip != configured_fixed_ip):
floating_ip = configured_fip['floating_ip_address']
self._floating_ip_removed(ri, ri.ex_gw_port,
floating_ip, configured_fixed_ip)
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_DOWN)
fips_to_remove.append(configured_fip)
fips_to_add.append(new_fip)
for configured_fip in fips_to_remove:
# remove fip from "cache" of fips configured in device
ri.floating_ips.remove(configured_fip)
for configured_fip in fips_to_add:
self._floating_ip_added(ri, ex_gw_port,
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
# add fip to "cache" of fips configured in device
ri.floating_ips.append(configured_fip)
fip_statuses[configured_fip['id']] = (
bc.constants.FLOATINGIP_STATUS_ACTIVE)
LOG.debug("Add to fip_statuses ACTIVE id:%s fl_ip:%s fx_ip:%s",
configured_fip['id'],
configured_fip['floating_ip_address'],
configured_fip['fixed_ip_address'])
if fip_statuses:
LOG.debug("Sending floatingip_statuses_update: %s", fip_statuses)
self.plugin_rpc.update_floatingip_statuses(
self.context, ri.router_id, fip_statuses) | Process a router's floating ips.
Compare floatingips configured in device (i.e., those fips in
the ri.floating_ips "cache") with the router's updated floating ips
(in ri.router.floating_ips) and determine floating_ips which were
added or removed. Notify driver of the change via
`floating_ip_added()` or `floating_ip_removed()`. Also update plugin
with status of fips.
:param ri: RouterInfo object of the router being processed.
:param ex_gw_port: Port dict of the external gateway port.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails. | entailment |
def _router_added(self, router_id, router):
"""Operations when a router is added.
Create a new RouterInfo object for this router and add it to the
service helpers router_info dictionary. Then `router_added()` is
called on the device driver.
:param router_id: id of the router
:param router: router dict
:return: None
"""
ri = RouterInfo(router_id, router)
driver = self.driver_manager.set_driver(router)
if router[ROUTER_ROLE_ATTR] in [
c_constants.ROUTER_ROLE_GLOBAL,
c_constants.ROUTER_ROLE_LOGICAL_GLOBAL]:
# No need to create a vrf for Global or logical global routers
LOG.debug("Skipping router_added device processing for %(id)s as "
"its role is %(role)s",
{'id': router_id, 'role': router[ROUTER_ROLE_ATTR]})
else:
driver.router_added(ri)
self.router_info[router_id] = ri | Operations when a router is added.
Create a new RouterInfo object for this router and add it to the
service helpers router_info dictionary. Then `router_added()` is
called on the device driver.
:param router_id: id of the router
:param router: router dict
:return: None | entailment |
def _router_removed(self, router_id, deconfigure=True):
"""Operations when a router is removed.
Get the RouterInfo object corresponding to the router in the service
helpers's router_info dict. If deconfigure is set to True,
remove this router's configuration from the hosting device.
:param router_id: id of the router
:param deconfigure: if True, the router's configuration is deleted from
the hosting device.
:return: None
"""
ri = self.router_info.get(router_id)
if ri is None:
LOG.warning("Info for router %s was not found. "
"Skipping router removal.", router_id)
return
ri.router['gw_port'] = None
ri.router[bc.constants.INTERFACE_KEY] = []
ri.router[bc.constants.FLOATINGIP_KEY] = []
try:
hd = ri.router['hosting_device']
# We proceed to removing the configuration from the device
# only if (a) deconfigure is set to True (default)
# (b) the router's hosting device is reachable.
if (deconfigure and
self._dev_status.is_hosting_device_reachable(hd)):
self._process_router(ri)
driver = self.driver_manager.get_driver(router_id)
driver.router_removed(ri)
self.driver_manager.remove_driver(router_id)
del self.router_info[router_id]
self._del_from_removed_routers_cache(router_id)
except cfg_exceptions.DriverException:
LOG.warning("Router remove for router_id: %s was incomplete. "
"Adding the router to removed_routers list",
router_id)
self._update_removed_routers_cache([router_id])
# remove this router from updated_routers if it is there. It might
# end up there too if exception was thrown earlier inside
# `_process_router()`
self._del_from_updated_routers_cache(router_id)
except ncc_errors.SessionCloseError as e:
LOG.exception("ncclient Unexpected session close %s"
" while attempting to remove router", e)
if not self._dev_status.is_hosting_device_reachable(hd):
LOG.debug("Lost connectivity to Hosting Device %s" % hd['id'])
# rely on heartbeat to detect HD state
# and schedule resync when the device comes back
else:
# retry the router removal on the next pass
self._update_removed_routers_cache([router_id])
LOG.debug("Interim connectivity lost to hosting device %s, "
"enqueuing router %s in removed_routers set" %
pp.pformat(hd), router_id) | Operations when a router is removed.
Get the RouterInfo object corresponding to the router in the service
helpers's router_info dict. If deconfigure is set to True,
remove this router's configuration from the hosting device.
:param router_id: id of the router
:param deconfigure: if True, the router's configuration is deleted from
the hosting device.
:return: None | entailment |
def _routes_updated(self, ri):
"""Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails.
"""
new_routes = ri.router['routes']
old_routes = ri.routes
adds, removes = bc.common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
driver = self.driver_manager.get_driver(ri.id)
driver.routes_updated(ri, 'delete', route)
ri.routes = new_routes | Update the state of routes in the router.
Compares the current routes with the (configured) existing routes
and detect what was removed or added. Then configure the
logical router in the hosting device accordingly.
:param ri: RouterInfo corresponding to the router.
:return: None
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
DriverException if the configuration operation fails. | entailment |
def get_running_config(self, conn):
"""Get the ASR1k's current running config.
:return: Current IOS running config as multiline string
"""
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
return ioscfg | Get the ASR1k's current running config.
:return: Current IOS running config as multiline string | entailment |
def subintf_real_ip_check_gw_port(self, gw_port, ip_addr, netmask):
"""
checks running-cfg derived ip_addr and netmask against neutron-db
gw_port
"""
if gw_port is not None:
found = False
for i in range(len(gw_port['fixed_ips'])):
target_ip = gw_port['fixed_ips'][i]['ip_address']
if ip_addr == target_ip:
found = True
break
if found is False:
LOG.info("Subintf real IP is incorrect, deleting")
return False
subnet_id = gw_port['fixed_ips'][i]['subnet_id']
subnet = next(
sn for sn in gw_port['subnets'] if sn['id'] == subnet_id)
target_net = netaddr.IPNetwork(subnet['cidr'])
if netmask != str(target_net.netmask):
LOG.info("Subintf has incorrect netmask, deleting")
return False
return True
return False | checks running-cfg derived ip_addr and netmask against neutron-db
gw_port | entailment |
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
Reference: http://www.saltycrane.com/blog/2009/11/trying-out-retry
-decorator-python/
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
LOG.debug("%(err_mess)s. Retry calling function "
"'%(f_name)s' in %(delta)d seconds.",
{'err_mess': str(e), 'f_name': f.__name__,
'delta': mdelay})
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
LOG.debug("Last retry calling function '%s'.", f.__name__)
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry | Retry calling the decorated function using an exponential backoff.
Reference: http://www.saltycrane.com/blog/2009/11/trying-out-retry
-decorator-python/
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:param tries: number of times to try (not retry) before giving up
:param delay: initial delay between retries in seconds
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry | entailment |
def _get_connection(self):
"""Make SSH connection to the IOS XE device.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also interfaces (except management) are typically
disabled by default when it is booted. So if connecting for the first
time, driver will enable all other interfaces and keep that status in
the `_itfcs_enabled` flag.
"""
try:
if self._ncc_connection and self._ncc_connection.connected:
return self._ncc_connection
else:
# ncclient needs 'name' to be 'csr' in order to communicate
# with the device in the correct way.
self._ncc_connection = manager.connect(
host=self._host_ip, port=self._host_ssh_port,
username=self._username, password=self._password,
device_params={'name': "csr"}, timeout=self._timeout)
if not self._itfcs_enabled:
self._itfcs_enabled = self._enable_itfcs(
self._ncc_connection)
return self._ncc_connection
except Exception as e:
conn_params = {'host': self._host_ip, 'port': self._host_ssh_port,
'user': self._username,
'timeout': self._timeout, 'reason': e.message}
raise cfg_exc.ConnectionException(**conn_params) | Make SSH connection to the IOS XE device.
The external ncclient library is used for creating this connection.
This method keeps state of any existing connections and reuses them if
already connected. Also interfaces (except management) are typically
disabled by default when it is booted. So if connecting for the first
time, driver will enable all other interfaces and keep that status in
the `_itfcs_enabled` flag. | entailment |
def _get_interfaces(self):
"""Get a list of interfaces on this hosting device.
:return: List of the interfaces
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
itfcs_raw = parse.find_lines("^interface GigabitEthernet")
itfcs = [raw_if.strip().split(' ')[1] for raw_if in itfcs_raw]
LOG.debug("Interfaces on hosting device: %s", itfcs)
return itfcs | Get a list of interfaces on this hosting device.
:return: List of the interfaces | entailment |
def _get_interface_ip(self, interface_name):
"""Get the ip address for an interface.
:param interface_name: interface_name as a string
:return: ip address of interface as a string
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
children = parse.find_children("^interface %s" % interface_name)
for line in children:
if 'ip address' in line:
ip_address = line.strip().split(' ')[2]
LOG.debug("IP Address:%s", ip_address)
return ip_address
LOG.warning("Cannot find interface: %s", interface_name)
return None | Get the ip address for an interface.
:param interface_name: interface_name as a string
:return: ip address of interface as a string | entailment |
def _interface_exists(self, interface):
"""Check whether interface exists."""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
itfcs_raw = parse.find_lines("^interface " + interface)
return len(itfcs_raw) > 0 | Check whether interface exists. | entailment |
def _get_vrfs(self):
"""Get the current VRFs configured in the device.
:return: A list of vrf names as string
"""
vrfs = []
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
vrfs_raw = parse.find_lines("^vrf definition")
for line in vrfs_raw:
# raw format ['ip vrf <vrf-name>',....]
vrf_name = line.strip().split(' ')[2]
vrfs.append(vrf_name)
LOG.info("VRFs:%s", vrfs)
return vrfs | Get the current VRFs configured in the device.
:return: A list of vrf names as string | entailment |
def _get_capabilities(self):
"""Get the servers NETCONF capabilities.
:return: List of server capabilities.
"""
conn = self._get_connection()
capabilities = []
for c in conn.server_capabilities:
capabilities.append(c)
LOG.debug("Server capabilities: %s", capabilities)
return capabilities | Get the servers NETCONF capabilities.
:return: List of server capabilities. | entailment |
def _get_running_config(self, split=True):
"""Get the IOS XE device's current running config.
:return: Current IOS running config as multiline string
"""
conn = self._get_connection()
config = conn.get_config(source="running")
if config:
root = ET.fromstring(config._raw)
running_config = root[0][0]
if split is True:
rgx = re.compile("\r*\n+")
ioscfg = rgx.split(running_config.text)
else:
ioscfg = running_config.text
return ioscfg | Get the IOS XE device's current running config.
:return: Current IOS running config as multiline string | entailment |
def _check_acl(self, acl_no, network, netmask):
"""Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return:
"""
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
' permit ' + str(network) + ' ' + str(netmask)]
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
acls_raw = parse.find_children(exp_cfg_lines[0])
if acls_raw:
if exp_cfg_lines[1] in acls_raw:
return True
LOG.error("Mismatch in ACL configuration for %s", acl_no)
return False
LOG.debug("%s is not present in config", acl_no)
return False | Check a ACL config exists in the running config.
:param acl_no: access control list (ACL) number
:param network: network which this ACL permits
:param netmask: netmask of the network
:return: | entailment |
def _cfg_exists(self, cfg_str):
"""Check a partial config string exists in the running config.
:param cfg_str: config string to check
:return : True or False
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
cfg_raw = parse.find_lines("^" + cfg_str)
LOG.debug("_cfg_exists(): Found lines %s", cfg_raw)
return len(cfg_raw) > 0 | Check a partial config string exists in the running config.
:param cfg_str: config string to check
:return : True or False | entailment |
def caller_name(self, skip=2):
"""
Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height
"""
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
# TODO(asr1kteam): consider using __main__
if module:
name.append(module.__name__)
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# XXX: there seems to be no way to detect static method call,
# it will be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__)
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename) # function or a method
del parentframe
return ".".join(name) | Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height | entailment |
def _check_response(self, rpc_obj, snippet_name, conf_str=None):
"""This function checks the rpc response object for status.
This function takes as input the response rpc_obj and the snippet name
that was executed. It parses it to see, if the last edit operation was
a success or not.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<ok />
</rpc-reply>
In case of error, IOS XE device sends a response as follows.
We take the error type and tag.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
</rpc-error>
</rpc-reply>
:return: True if the config operation completed successfully
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
IOSXEConfigException
"""
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s",
{'snippet_name': snippet_name, 'rpc_obj': rpc_obj.xml})
xml_str = rpc_obj.xml
if "<ok />" in xml_str:
# LOG.debug("RPCReply for %s is OK", snippet_name)
LOG.info("%s was successfully executed", snippet_name)
return True
# Not Ok, we throw a ConfigurationException
e_type = rpc_obj._root[0][0].text
e_tag = rpc_obj._root[0][1].text
params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag,
'dev_id': self.hosting_device['id'],
'ip': self._host_ip, 'confstr': conf_str}
raise cfg_exc.IOSXEConfigException(**params) | This function checks the rpc response object for status.
This function takes as input the response rpc_obj and the snippet name
that was executed. It parses it to see, if the last edit operation was
a success or not.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<ok />
</rpc-reply>
In case of error, IOS XE device sends a response as follows.
We take the error type and tag.
<?xml version="1.0" encoding="UTF-8"?>
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
xmlns="urn:ietf:params:netconf:base:1.0">
<rpc-error>
<error-type>protocol</error-type>
<error-tag>operation-failed</error-tag>
<error-severity>error</error-severity>
</rpc-error>
</rpc-reply>
:return: True if the config operation completed successfully
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
IOSXEConfigException | entailment |
def _get_instances_for_project(self, project_id):
"""Return all instances for a given project.
:project_id: UUID of project (tenant)
"""
search_opts = {'marker': None,
'all_tenants': True,
'project_id': project_id}
try:
servers = self._novaclnt.servers.list(True, search_opts)
LOG.debug('_get_instances_for_project: servers=%s', servers)
return servers
except nexc.Unauthorized:
emsg = (_LE('Failed to get novaclient:Unauthorised '
'project_id=%(proj)s user=%(user)s'),
{'proj': self._project_id, 'user': self._user_name})
LOG.exception(emsg)
raise nexc.ClientException(emsg)
except nexc.AuthorizationFailure as err:
emsg = (_LE("Failed to get novaclient %s"))
LOG.exception(emsg, err)
raise nexc.ClientException(emsg % err) | Return all instances for a given project.
:project_id: UUID of project (tenant) | entailment |
def get_instance_for_uuid(self, uuid, project_id):
"""Return instance name for given uuid of an instance and project.
:uuid: Instance's UUID
:project_id: UUID of project (tenant)
"""
instance_name = self._inst_info_cache.get((uuid, project_id))
if instance_name:
return instance_name
instances = self._get_instances_for_project(project_id)
for inst in instances:
if inst.id.replace('-', '') == uuid:
LOG.debug('get_instance_for_uuid: name=%s', inst.name)
instance_name = inst.name
self._inst_info_cache[(uuid, project_id)] = instance_name
return instance_name
return instance_name | Return instance name for given uuid of an instance and project.
:uuid: Instance's UUID
:project_id: UUID of project (tenant) | entailment |
def l3_tenant_id(cls):
"""Returns id of tenant owning hosting device resources."""
if cls._l3_tenant_uuid is None:
if hasattr(cfg.CONF.keystone_authtoken, 'project_domain_id'):
# TODO(sridar): hack for now to determing if keystone v3
# API is to be used.
cls._l3_tenant_uuid = cls._get_tenant_id_using_keystone_v3()
else:
cls._l3_tenant_uuid = cls._get_tenant_id_using_keystone_v2()
return cls._l3_tenant_uuid | Returns id of tenant owning hosting device resources. | entailment |
def mgmt_nw_id(cls):
"""Returns id of the management network."""
if cls._mgmt_nw_uuid is None:
tenant_id = cls.l3_tenant_id()
if not tenant_id:
return
net = bc.get_plugin().get_networks(
bc.context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.management_network]},
['id', 'subnets'])
if len(net) == 1:
num_subnets = len(net[0]['subnets'])
if num_subnets == 0:
LOG.error('The management network has no subnet. '
'Please assign one.')
return
elif num_subnets > 1:
LOG.info('The management network has %d subnets. The '
'first one will be used.', num_subnets)
cls._mgmt_nw_uuid = net[0].get('id')
cls._mgmt_subnet_uuid = net[0]['subnets'][0]
elif len(net) > 1:
# Management network must have a unique name.
LOG.error('The management network for does not have '
'unique name. Please ensure that it is.')
else:
# Management network has not been created.
LOG.error('There is no virtual management network. Please '
'create one.')
return cls._mgmt_nw_uuid | Returns id of the management network. | entailment |
def mgmt_sec_grp_id(cls):
"""Returns id of security group used by the management network."""
if not extensions.is_extension_supported(bc.get_plugin(),
"security-group"):
return
if cls._mgmt_sec_grp_id is None:
# Get the id for the _mgmt_security_group_id
tenant_id = cls.l3_tenant_id()
res = bc.get_plugin().get_security_groups(
bc.context.get_admin_context(),
{'tenant_id': [tenant_id],
'name': [cfg.CONF.general.default_security_group]},
['id'])
if len(res) == 1:
sec_grp_id = res[0].get('id', None)
cls._mgmt_sec_grp_id = sec_grp_id
elif len(res) > 1:
# the mgmt sec group must be unique.
LOG.error('The security group for the management network '
'does not have unique name. Please ensure that '
'it is.')
else:
# Service VM Mgmt security group is not present.
LOG.error('There is no security group for the management '
'network. Please create one.')
return cls._mgmt_sec_grp_id | Returns id of security group used by the management network. | entailment |
def get_hosting_device_driver(self, context, id):
"""Returns device driver for hosting device template with <id>."""
if id is None:
return
try:
return self._hosting_device_drivers[id]
except KeyError:
try:
template = self._get_hosting_device_template(context, id)
self._hosting_device_drivers[id] = importutils.import_object(
template['device_driver'])
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception("Error loading hosting device driver for "
"hosting device template %s", id)
return self._hosting_device_drivers.get(id) | Returns device driver for hosting device template with <id>. | entailment |
def get_hosting_device_plugging_driver(self, context, id):
"""Returns plugging driver for hosting device template with <id>."""
if id is None:
return
try:
return self._plugging_drivers[id]
except KeyError:
try:
template = self._get_hosting_device_template(context, id)
self._plugging_drivers[id] = importutils.import_object(
template['plugging_driver'])
except (ImportError, TypeError, n_exc.NeutronException):
LOG.exception("Error loading plugging driver for hosting "
"device template %s", id)
return self._plugging_drivers.get(id) | Returns plugging driver for hosting device template with <id>. | entailment |
def acquire_hosting_device_slots(self, context, hosting_device, resource,
resource_type, resource_service, num,
exclusive=False):
"""Assign <num> slots in <hosting_device> to logical <resource>.
If exclusive is True the hosting device is bound to the resource's
tenant. Otherwise it is not bound to any tenant.
Returns True if allocation was granted, False otherwise.
"""
bound = hosting_device['tenant_bound']
if ((bound is not None and bound != resource['tenant_id']) or
(exclusive and not self._exclusively_used(context, hosting_device,
resource['tenant_id']))):
LOG.debug(
'Rejecting allocation of %(num)d slots in tenant %(bound)s '
'hosting device %(device)s to logical resource %(r_id)s due '
'to exclusive use conflict.',
{'num': num,
'bound': 'unbound' if bound is None else bound + ' bound',
'device': hosting_device['id'], 'r_id': resource['id']})
return False
with context.session.begin(subtransactions=True):
res_info = {'resource': resource, 'type': resource_type,
'service': resource_service}
slot_info, query = self._get_or_create_slot_allocation(
context, hosting_device, res_info)
if slot_info is None:
LOG.debug('Rejecting allocation of %(num)d slots in hosting '
'device %(device)s to logical resource %(r_id)s',
{'num': num, 'device': hosting_device['id'],
'r_id': resource['id']})
return False
new_allocation = num + slot_info.num_allocated
if hosting_device['template']['slot_capacity'] < new_allocation:
LOG.debug('Rejecting allocation of %(num)d slots in '
'hosting device %(device)s to logical resource '
'%(r_id)s due to insufficent slot availability.',
{'num': num, 'device': hosting_device['id'],
'r_id': resource['id']})
self._dispatch_pool_maintenance_job(hosting_device['template'])
return False
# handle any changes to exclusive usage by tenant
if exclusive and bound is None:
self._update_hosting_device_exclusivity(
context, hosting_device, resource['tenant_id'])
bound = resource['tenant_id']
elif not exclusive and bound is not None:
self._update_hosting_device_exclusivity(context,
hosting_device, None)
bound = None
slot_info.num_allocated = new_allocation
context.session.add(slot_info)
self._dispatch_pool_maintenance_job(hosting_device['template'])
# report success
LOG.info('Allocated %(num)d additional slots in tenant %(bound)s'
'bound hosting device %(hd_id)s. In total %(total)d '
'slots are now allocated in that hosting device for '
'logical resource %(r_id)s.',
{'num': num, 'bound': 'un-' if bound is None else bound + ' ',
'total': new_allocation, 'hd_id': hosting_device['id'],
'r_id': resource['id']})
return True | Assign <num> slots in <hosting_device> to logical <resource>.
If exclusive is True the hosting device is bound to the resource's
tenant. Otherwise it is not bound to any tenant.
Returns True if allocation was granted, False otherwise. | entailment |
def release_hosting_device_slots(self, context, hosting_device, resource,
num):
"""Free <num> slots in <hosting_device> from logical resource <id>.
Returns True if deallocation was successful. False otherwise.
"""
with context.session.begin(subtransactions=True):
num_str = str(num) if num >= 0 else "all"
res_info = {'resource': resource}
slot_info, query = self._get_or_create_slot_allocation(
context, hosting_device, res_info, create=False)
if slot_info is None:
LOG.debug('Rejecting de-allocation of %(num)s slots in '
'hosting device %(device)s for logical resource '
'%(id)s', {'num': num_str,
'device': hosting_device['id'],
'id': resource['id']})
return False
if num >= 0:
new_allocation = slot_info.num_allocated - num
else:
# if a negative num is specified all slot allocations for
# the logical resource in the hosting device is removed
new_allocation = 0
if new_allocation < 0:
LOG.debug('Rejecting de-allocation of %(num)s slots in '
'hosting device %(device)s for logical resource '
'%(id)s since only %(alloc)d slots are allocated.',
{'num': num_str, 'device': hosting_device['id'],
'id': resource['id'],
'alloc': slot_info.num_allocated})
self._dispatch_pool_maintenance_job(hosting_device['template'])
return False
elif new_allocation == 0:
result = query.delete()
LOG.info('De-allocated %(num)s slots from hosting device '
'%(hd_id)s. %(total)d slots are now allocated in '
'that hosting device.',
{'num': num_str, 'total': new_allocation,
'hd_id': hosting_device['id']})
if (hosting_device['tenant_bound'] is not None and
context.session.query(hd_models.SlotAllocation).filter_by(
hosting_device_id=hosting_device['id']).first() is
None):
# make hosting device tenant unbound if no logical
# resource use it anymore
hosting_device['tenant_bound'] = None
context.session.add(hosting_device)
LOG.info('Making hosting device %(hd_id)s with no '
'allocated slots tenant unbound.',
{'hd_id': hosting_device['id']})
self._dispatch_pool_maintenance_job(hosting_device['template'])
return result == 1
LOG.info('De-allocated %(num)s slots from hosting device '
'%(hd_id)s. %(total)d slots are now allocated in '
'that hosting device.',
{'num': num_str, 'total': new_allocation,
'hd_id': hosting_device['id']})
slot_info.num_allocated = new_allocation
context.session.add(slot_info)
self._dispatch_pool_maintenance_job(hosting_device['template'])
# report success
return True | Free <num> slots in <hosting_device> from logical resource <id>.
Returns True if deallocation was successful. False otherwise. | entailment |
def get_hosting_devices_qry(self, context, hosting_device_ids,
load_agent=True):
"""Returns hosting devices with <hosting_device_ids>."""
query = context.session.query(hd_models.HostingDevice)
if load_agent:
query = query.options(joinedload('cfg_agent'))
if len(hosting_device_ids) > 1:
query = query.filter(hd_models.HostingDevice.id.in_(
hosting_device_ids))
else:
query = query.filter(hd_models.HostingDevice.id ==
hosting_device_ids[0])
return query | Returns hosting devices with <hosting_device_ids>. | entailment |
def delete_all_hosting_devices(self, context, force_delete=False):
"""Deletes all hosting devices."""
for item in self._get_collection_query(
context, hd_models.HostingDeviceTemplate):
self.delete_all_hosting_devices_by_template(
context, template=item, force_delete=force_delete) | Deletes all hosting devices. | entailment |
def delete_all_hosting_devices_by_template(self, context, template,
force_delete=False):
"""Deletes all hosting devices based on <template>."""
plugging_drv = self.get_hosting_device_plugging_driver(
context, template['id'])
hosting_device_drv = self.get_hosting_device_driver(context,
template['id'])
if plugging_drv is None or hosting_device_drv is None:
return
is_vm = template['host_category'] == VM_CATEGORY
query = context.session.query(hd_models.HostingDevice)
query = query.filter(hd_models.HostingDevice.template_id ==
template['id'])
for hd in query:
if not (hd.auto_delete or force_delete):
# device manager is not responsible for life cycle
# management of this hosting device.
continue
res = plugging_drv.get_hosting_device_resources(
context, hd.id, hd.complementary_id, self.l3_tenant_id(),
self.mgmt_nw_id())
if is_vm:
self.svc_vm_mgr.delete_service_vm(context, hd.id)
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
with context.session.begin(subtransactions=True):
# remove all allocations in this hosting device
context.session.query(hd_models.SlotAllocation).filter_by(
hosting_device_id=hd['id']).delete()
context.session.delete(hd) | Deletes all hosting devices based on <template>. | entailment |
def get_device_info_for_agent(self, context, hosting_device_db):
"""Returns information about <hosting_device> needed by config agent.
Convenience function that service plugins can use to populate
their resources with information about the device hosting their
logical resource.
"""
template = hosting_device_db.template
mgmt_port = hosting_device_db.management_port
mgmt_ip = (mgmt_port['fixed_ips'][0]['ip_address']
if mgmt_port else hosting_device_db.management_ip_address)
return {'id': hosting_device_db.id,
'name': template.name,
'template_id': template.id,
'credentials': self._get_credentials(hosting_device_db),
'host_category': template.host_category,
'admin_state_up': hosting_device_db.admin_state_up,
'service_types': template.service_types,
'management_ip_address': mgmt_ip,
'protocol_port': hosting_device_db.protocol_port,
'timeout': None,
'created_at': str(hosting_device_db.created_at),
'status': hosting_device_db.status,
'booting_time': template.booting_time} | Returns information about <hosting_device> needed by config agent.
Convenience function that service plugins can use to populate
their resources with information about the device hosting their
logical resource. | entailment |
def _process_non_responsive_hosting_device(self, context, hosting_device):
"""Host type specific processing of non responsive hosting devices.
:param hosting_device: db object for hosting device
:return: True if hosting_device has been deleted, otherwise False
"""
if (hosting_device['template']['host_category'] == VM_CATEGORY and
hosting_device['auto_delete']):
self._delete_dead_service_vm_hosting_device(context,
hosting_device)
return True
return False | Host type specific processing of non responsive hosting devices.
:param hosting_device: db object for hosting device
:return: True if hosting_device has been deleted, otherwise False | entailment |
def _maintain_hosting_device_pool(self, context, template):
"""Maintains the pool of hosting devices that are based on <template>.
Ensures that the number of standby hosting devices (essentially
service VMs) is kept at a suitable level so that resource creation is
not slowed down by booting of the hosting device.
:param context: context for this operation
:param template: db object for hosting device template
"""
#TODO(bobmel): Support HA/load-balanced Neutron servers:
#TODO(bobmel): Locking across multiple running Neutron server instances
lock = self._get_template_pool_lock(template['id'])
acquired = lock.acquire(False)
if not acquired:
# pool maintenance for this template already ongoing, so abort
return
try:
# Maintain a pool of approximately 'desired_slots_free' available
# for allocation. Approximately means that
# abs(desired_slots_free-capacity) <= available_slots <=
# desired_slots_free+capacity
capacity = template['slot_capacity']
if capacity == 0:
return
desired = template['desired_slots_free']
available = self._get_total_available_slots(
context, template['id'], capacity)
grow_threshold = abs(desired - capacity)
if available <= grow_threshold:
num_req = int(math.ceil(grow_threshold / (1.0 * capacity)))
num_created = len(self._create_svc_vm_hosting_devices(
context, num_req, template))
if num_created < num_req:
LOG.warning('Requested %(requested)d instances based '
'on hosting device template %(template)s '
'but could only create %(created)d '
'instances',
{'requested': num_req,
'template': template['id'],
'created': num_created})
elif available >= desired + capacity:
num_req = int(
math.floor((available - desired) / (1.0 * capacity)))
num_deleted = self._delete_idle_service_vm_hosting_devices(
context, num_req, template)
if num_deleted < num_req:
LOG.warning('Tried to delete %(requested)d instances '
'based on hosting device template '
'%(template)s but could only delete '
'%(deleted)d instances',
{'requested': num_req, 'template': template['id'],
'deleted': num_deleted})
finally:
lock.release() | Maintains the pool of hosting devices that are based on <template>.
Ensures that the number of standby hosting devices (essentially
service VMs) is kept at a suitable level so that resource creation is
not slowed down by booting of the hosting device.
:param context: context for this operation
:param template: db object for hosting device template | entailment |
def _create_svc_vm_hosting_devices(self, context, num, template):
"""Creates <num> or less service VM instances based on <template>.
These hosting devices can be bound to a certain tenant or for shared
use. A list with the created hosting device VMs is returned.
"""
hosting_devices = []
template_id = template['id']
credentials_id = template['default_credentials_id']
plugging_drv = self.get_hosting_device_plugging_driver(context,
template_id)
hosting_device_drv = self.get_hosting_device_driver(context,
template_id)
if plugging_drv is None or hosting_device_drv is None or num <= 0:
return hosting_devices
#TODO(bobmel): Determine value for max_hosted properly
max_hosted = 1 # template['slot_capacity']
dev_data, mgmt_context = self._get_resources_properties_for_hd(
template, credentials_id)
credentials_info = self._credentials.get(credentials_id)
if credentials_info is None:
LOG.error('Could not find credentials for hosting device'
'template %s. Aborting VM hosting device creation.',
template_id)
return hosting_devices
connectivity_info = self._get_mgmt_connectivity_info(
context, self.mgmt_subnet_id())
for i in range(num):
complementary_id = uuidutils.generate_uuid()
res = plugging_drv.create_hosting_device_resources(
context, complementary_id, self.l3_tenant_id(), mgmt_context,
max_hosted)
if res.get('mgmt_port') is None:
# Required ports could not be created
return hosting_devices
connectivity_info['mgmt_port'] = res['mgmt_port']
vm_instance = self.svc_vm_mgr.dispatch_service_vm(
context, template['name'] + '_nrouter', template['image'],
template['flavor'], hosting_device_drv, credentials_info,
connectivity_info, res.get('ports'))
if vm_instance is not None:
dev_data.update(
{'id': vm_instance['id'],
'complementary_id': complementary_id,
'management_ip_address': res['mgmt_port'][
'fixed_ips'][0]['ip_address'],
'management_port_id': res['mgmt_port']['id']})
self.create_hosting_device(context,
{'hosting_device': dev_data})
hosting_devices.append(vm_instance)
else:
# Fundamental error like could not contact Nova
# Cleanup anything we created
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
break
LOG.info('Created %(num)d hosting device VMs based on template '
'%(t_id)s', {'num': len(hosting_devices),
't_id': template_id})
return hosting_devices | Creates <num> or less service VM instances based on <template>.
These hosting devices can be bound to a certain tenant or for shared
use. A list with the created hosting device VMs is returned. | entailment |
def _delete_idle_service_vm_hosting_devices(self, context, num, template):
"""Deletes <num> or less unused <template>-based service VM instances.
The number of deleted service vm instances is returned.
"""
# Delete the "youngest" hosting devices since they are more likely
# not to have finished booting
num_deleted = 0
plugging_drv = self.get_hosting_device_plugging_driver(context,
template['id'])
hosting_device_drv = self.get_hosting_device_driver(context,
template['id'])
if plugging_drv is None or hosting_device_drv is None or num <= 0:
return num_deleted
query = context.session.query(hd_models.HostingDevice)
query = query.outerjoin(
hd_models.SlotAllocation,
hd_models.HostingDevice.id ==
hd_models.SlotAllocation.hosting_device_id)
query = query.filter(hd_models.HostingDevice.template_id ==
template['id'],
hd_models.HostingDevice.admin_state_up ==
expr.true(),
hd_models.HostingDevice.tenant_bound ==
expr.null(),
hd_models.HostingDevice.auto_delete ==
expr.true())
query = query.group_by(hd_models.HostingDevice.id).having(
func.count(hd_models.SlotAllocation.logical_resource_id) == 0)
query = query.order_by(
hd_models.HostingDevice.created_at.desc(),
func.count(hd_models.SlotAllocation.logical_resource_id))
hd_candidates = query.all()
num_possible_to_delete = min(len(hd_candidates), num)
for i in range(num_possible_to_delete):
res = plugging_drv.get_hosting_device_resources(
context, hd_candidates[i]['id'],
hd_candidates[i]['complementary_id'], self.l3_tenant_id(),
self.mgmt_nw_id())
if self.svc_vm_mgr.delete_service_vm(context,
hd_candidates[i]['id']):
with context.session.begin(subtransactions=True):
context.session.delete(hd_candidates[i])
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
num_deleted += 1
LOG.info('Deleted %(num)d hosting devices based on template '
'%(t_id)s', {'num': num_deleted, 't_id': template['id']})
return num_deleted | Deletes <num> or less unused <template>-based service VM instances.
The number of deleted service vm instances is returned. | entailment |
def _delete_dead_service_vm_hosting_device(self, context, hosting_device):
"""Deletes a presumably dead <hosting_device> service VM.
This will indirectly make all of its hosted resources unscheduled.
"""
if hosting_device is None:
return
plugging_drv = self.get_hosting_device_plugging_driver(
context, hosting_device['template_id'])
hosting_device_drv = self.get_hosting_device_driver(
context, hosting_device['template_id'])
if plugging_drv is None or hosting_device_drv is None:
return
res = plugging_drv.get_hosting_device_resources(
context, hosting_device['id'], hosting_device['complementary_id'],
self.l3_tenant_id(), self.mgmt_nw_id())
if not self.svc_vm_mgr.delete_service_vm(context,
hosting_device['id']):
LOG.error('Failed to delete hosting device %s service VM. '
'Will un-register it anyway.',
hosting_device['id'])
plugging_drv.delete_hosting_device_resources(
context, self.l3_tenant_id(), **res)
with context.session.begin(subtransactions=True):
# remove all allocations in this hosting device
context.session.query(hd_models.SlotAllocation).filter_by(
hosting_device_id=hosting_device['id']).delete()
context.session.delete(hosting_device) | Deletes a presumably dead <hosting_device> service VM.
This will indirectly make all of its hosted resources unscheduled. | entailment |
def _get_total_available_slots(self, context, template_id, capacity):
"""Returns available slots in idle devices based on <template_id>.
Only slots in tenant unbound hosting devices are counted to ensure
there is always hosting device slots available regardless of tenant.
"""
query = context.session.query(hd_models.HostingDevice.id)
query = query.outerjoin(
hd_models.SlotAllocation,
hd_models.HostingDevice.id == hd_models.SlotAllocation
.hosting_device_id)
query = query.filter(
hd_models.HostingDevice.template_id == template_id,
hd_models.HostingDevice.admin_state_up == expr.true(),
hd_models.HostingDevice.tenant_bound == expr.null())
query = query.group_by(hd_models.HostingDevice.id)
query = query.having(
func.sum(hd_models.SlotAllocation.num_allocated) == expr.null())
num_hosting_devices = query.count()
return num_hosting_devices * capacity | Returns available slots in idle devices based on <template_id>.
Only slots in tenant unbound hosting devices are counted to ensure
there is always hosting device slots available regardless of tenant. | entailment |
def _exclusively_used(self, context, hosting_device, tenant_id):
"""Checks if only <tenant_id>'s resources use <hosting_device>."""
return (context.session.query(hd_models.SlotAllocation).filter(
hd_models.SlotAllocation.hosting_device_id == hosting_device['id'],
hd_models.SlotAllocation.logical_resource_owner != tenant_id).
first() is None) | Checks if only <tenant_id>'s resources use <hosting_device>. | entailment |
def _update_hosting_device_exclusivity(self, context, hosting_device,
tenant_id):
"""Make <hosting device> bound or unbound to <tenant_id>.
If <tenant_id> is None the device is unbound, otherwise it gets bound
to that <tenant_id>
"""
with context.session.begin(subtransactions=True):
hosting_device['tenant_bound'] = tenant_id
context.session.add(hosting_device)
for item in (context.session.query(hd_models.SlotAllocation).
filter_by(hosting_device_id=hosting_device['id'])):
item['tenant_bound'] = tenant_id
context.session.add(item) | Make <hosting device> bound or unbound to <tenant_id>.
If <tenant_id> is None the device is unbound, otherwise it gets bound
to that <tenant_id> | entailment |
def _get_template_pool_lock(self, id):
"""Returns lock object for hosting device template with <id>."""
try:
return self._hosting_device_locks[id]
except KeyError:
self._hosting_device_locks[id] = threading.Lock()
return self._hosting_device_locks.get(id) | Returns lock object for hosting device template with <id>. | entailment |
def _create_hosting_device_templates_from_config(self):
"""To be called late during plugin initialization so that any hosting
device templates defined in the config file is properly inserted in
the DB.
"""
hdt_dict = config.get_specific_config('cisco_hosting_device_template')
attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[
ciscohostingdevicemanager.DEVICE_TEMPLATES]
adm_context = bc.context.get_admin_context()
for hdt_uuid, kv_dict in hdt_dict.items():
# ensure hdt_uuid is properly formatted
hdt_uuid = config.uuidify(hdt_uuid)
try:
self.get_hosting_device_template(adm_context, hdt_uuid)
is_create = False
except ciscohostingdevicemanager.HostingDeviceTemplateNotFound:
is_create = True
kv_dict['id'] = hdt_uuid
kv_dict['tenant_id'] = self.l3_tenant_id()
config.verify_resource_dict(kv_dict, True, attr_info)
hdt = {ciscohostingdevicemanager.DEVICE_TEMPLATE: kv_dict}
try:
if is_create:
self.create_hosting_device_template(adm_context, hdt)
else:
self.update_hosting_device_template(adm_context,
kv_dict['id'], hdt)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.error('Invalid hosting device template definition '
'in configuration file for template = %s',
hdt_uuid) | To be called late during plugin initialization so that any hosting
device templates defined in the config file is properly inserted in
the DB. | entailment |
def _create_hosting_devices_from_config(self):
"""To be called late during plugin initialization so that any hosting
device specified in the config file is properly inserted in the DB.
"""
hd_dict = config.get_specific_config('cisco_hosting_device')
attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[
ciscohostingdevicemanager.DEVICES]
adm_context = bc.context.get_admin_context()
for hd_uuid, kv_dict in hd_dict.items():
# ensure hd_uuid is properly formatted
hd_uuid = config.uuidify(hd_uuid)
try:
old_hd = self.get_hosting_device(adm_context, hd_uuid)
is_create = False
except ciscohostingdevicemanager.HostingDeviceNotFound:
old_hd = {}
is_create = True
kv_dict['id'] = hd_uuid
kv_dict['tenant_id'] = self.l3_tenant_id()
# make sure we keep using same config agent if it has been assigned
kv_dict['cfg_agent_id'] = old_hd.get('cfg_agent_id')
# make sure we keep using management port if it exists
kv_dict['management_port_id'] = old_hd.get('management_port_id')
config.verify_resource_dict(kv_dict, True, attr_info)
hd = {ciscohostingdevicemanager.DEVICE: kv_dict}
try:
if is_create:
self.create_hosting_device(adm_context, hd)
else:
self.update_hosting_device(adm_context, kv_dict['id'], hd)
except n_exc.NeutronException:
with excutils.save_and_reraise_exception():
LOG.error('Invalid hosting device specification in '
'configuration file for device = %s',
hd_uuid) | To be called late during plugin initialization so that any hosting
device specified in the config file is properly inserted in the DB. | entailment |
def add_router_to_hosting_device(self, client, hosting_device_id, body):
"""Adds a router to hosting device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.post((res_path + DEVICE_L3_ROUTERS) %
hosting_device_id, body=body) | Adds a router to hosting device. | entailment |
def remove_router_from_hosting_device(self, client, hosting_device_id,
router_id):
"""Remove a router from hosting_device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.delete((res_path + DEVICE_L3_ROUTERS + "/%s") % (
hosting_device_id, router_id)) | Remove a router from hosting_device. | entailment |
def list_routers_on_hosting_device(self, client, hosting_device_id,
**_params):
"""Fetches a list of routers hosted on a hosting device."""
res_path = hostingdevice.HostingDevice.resource_path
return client.get((res_path + DEVICE_L3_ROUTERS) %
hosting_device_id, params=_params) | Fetches a list of routers hosted on a hosting device. | entailment |
def list_hosting_devices_hosting_routers(self, client, router_id,
**_params):
"""Fetches a list of hosting devices hosting a router."""
return client.get((client.router_path + L3_ROUTER_DEVICES) %
router_id, params=_params) | Fetches a list of hosting devices hosting a router. | entailment |
def setup_client_rpc(self):
"""Setup RPC client for dfa agent."""
# Setup RPC client.
self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE,
exchange=constants.DFA_EXCHANGE) | Setup RPC client for dfa agent. | entailment |
def setup_rpc(self):
"""Setup RPC server for dfa agent."""
endpoints = RpcCallBacks(self._vdpm, self._iptd)
self.server = rpc.DfaRpcServer(self._qn, self._my_host, self._url,
endpoints,
exchange=constants.DFA_EXCHANGE) | Setup RPC server for dfa agent. | entailment |
def _add_rid_to_vrf_list(self, ri):
"""Add router ID to a VRF list.
In order to properly manage VRFs in the ASR, their
usage has to be tracked. VRFs are provided with neutron
router objects in their hosting_info fields of the gateway ports.
This means that the VRF is only available when the gateway port
of the router is set. VRFs can span routers, and even OpenStack
tenants, so lists of routers that belong to the same VRF are
kept in a dictionary, with the VRF name as the key.
"""
if ri.ex_gw_port or ri.router.get('gw_port'):
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
if not vrf_name:
return
if not self._router_ids_by_vrf.get(vrf_name):
LOG.debug("++ CREATING VRF %s" % vrf_name)
driver._do_create_vrf(vrf_name)
self._router_ids_by_vrf.setdefault(vrf_name, set()).add(
ri.router['id']) | Add router ID to a VRF list.
In order to properly manage VRFs in the ASR, their
usage has to be tracked. VRFs are provided with neutron
router objects in their hosting_info fields of the gateway ports.
This means that the VRF is only available when the gateway port
of the router is set. VRFs can span routers, and even OpenStack
tenants, so lists of routers that belong to the same VRF are
kept in a dictionary, with the VRF name as the key. | entailment |
def _remove_rid_from_vrf_list(self, ri):
"""Remove router ID from a VRF list.
This removes a router from the list of routers that's kept
in a map, using a VRF ID as the key. If the VRF exists, the
router is removed from the list if it's present. If the last
router in the list is removed, then the driver's method to
remove the VRF is called and the map entry for that
VRF is deleted.
"""
if ri.ex_gw_port or ri.router.get('gw_port'):
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
if self._router_ids_by_vrf.get(vrf_name) and (
ri.router['id'] in self._router_ids_by_vrf[vrf_name]):
self._router_ids_by_vrf[vrf_name].remove(ri.router['id'])
# If this is the last router in a VRF, then we can safely
# delete the VRF from the router config (handled by the driver)
if not self._router_ids_by_vrf.get(vrf_name):
LOG.debug("++ REMOVING VRF %s" % vrf_name)
driver._remove_vrf(ri)
del self._router_ids_by_vrf[vrf_name] | Remove router ID from a VRF list.
This removes a router from the list of routers that's kept
in a map, using a VRF ID as the key. If the VRF exists, the
router is removed from the list if it's present. If the last
router in the list is removed, then the driver's method to
remove the VRF is called and the map entry for that
VRF is deleted. | entailment |
def _internal_network_removed(self, ri, port, ex_gw_port):
"""Remove an internal router port
Check to see if this is the last port to be removed for
a given network scoped by a VRF (note: there can be
different mappings between VRFs and networks -- 1-to-1,
1-to-n, n-to-1, n-to-n -- depending on the configuration
and workflow used). If it is the last port, set the flag
indicating that the internal sub-interface for that netowrk
on the ASR should be deleted
"""
itfc_deleted = False
driver = self.driver_manager.get_driver(ri.id)
vrf_name = driver._get_vrf_name(ri)
network_name = ex_gw_port['hosting_info'].get('network_name')
if self._router_ids_by_vrf_and_ext_net.get(
vrf_name, {}).get(network_name) and (
ri.router['id'] in
self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):
# If this is the last port for this neutron router,
# then remove this router from the list
if len(ri.internal_ports) == 1 and port in ri.internal_ports:
self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name].remove(ri.router['id'])
# Check if any other routers in this VRF have this network,
# and if not, set the flag to remove the interface
if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(
network_name):
LOG.debug("++ REMOVING NETWORK %s" % network_name)
itfc_deleted = True
del self._router_ids_by_vrf_and_ext_net[
vrf_name][network_name]
if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):
del self._router_ids_by_vrf_and_ext_net[vrf_name]
driver.internal_network_removed(ri, port,
itfc_deleted=itfc_deleted)
if ri.snat_enabled and ex_gw_port:
driver.disable_internal_network_NAT(ri, port, ex_gw_port,
itfc_deleted=itfc_deleted) | Remove an internal router port
Check to see if this is the last port to be removed for
a given network scoped by a VRF (note: there can be
different mappings between VRFs and networks -- 1-to-1,
1-to-n, n-to-1, n-to-n -- depending on the configuration
and workflow used). If it is the last port, set the flag
indicating that the internal sub-interface for that netowrk
on the ASR should be deleted | entailment |
def do_list_organizations(self, line):
'''Get list of organization on DCNM.'''
org_list = self.dcnm_client.list_organizations()
if not org_list:
print('No organization found.')
return
org_table = PrettyTable(['Organization Name'])
for org in org_list:
org_table.add_row([org['organizationName']])
print(org_table) | Get list of organization on DCNM. | entailment |
def create_routertype(self, context, routertype):
"""Creates a router type.
Also binds it to the specified hosting device template.
"""
LOG.debug("create_routertype() called. Contents %s", routertype)
rt = routertype['routertype']
with context.session.begin(subtransactions=True):
routertype_db = l3_models.RouterType(
id=self._get_id(rt),
tenant_id=rt['tenant_id'],
name=rt['name'],
description=rt['description'],
template_id=rt['template_id'],
ha_enabled_by_default=rt['ha_enabled_by_default'],
shared=rt['shared'],
slot_need=rt['slot_need'],
scheduler=rt['scheduler'],
driver=rt['driver'],
cfg_agent_service_helper=rt['cfg_agent_service_helper'],
cfg_agent_driver=rt['cfg_agent_driver'])
context.session.add(routertype_db)
return self._make_routertype_dict(routertype_db) | Creates a router type.
Also binds it to the specified hosting device template. | entailment |
def associate_hosting_device_with_config_agent(
self, client, config_agent_id, body):
"""Associates a hosting_device with a config agent."""
return client.post((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES) % config_agent_id,
body=body) | Associates a hosting_device with a config agent. | entailment |
def disassociate_hosting_device_with_config_agent(
self, client, config_agent_id, hosting_device_id):
"""Disassociates a hosting_device with a config agent."""
return client.delete((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES + "/%s") % (
config_agent_id, hosting_device_id)) | Disassociates a hosting_device with a config agent. | entailment |
def list_hosting_device_handled_by_config_agent(
self, client, cfg_agent_id, **_params):
"""Fetches a list of hosting devices handled by a config agent."""
return client.get((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES) % cfg_agent_id,
params=_params) | Fetches a list of hosting devices handled by a config agent. | entailment |
def list_config_agents_handling_hosting_device(
self, client, hosting_device_id, **_params):
"""Fetches a list of config agents handling a hosting device."""
resource_path = '/dev_mgr/hosting_devices/%s'
return client.get((resource_path + HOSTING_DEVICE_CFG_AGENTS) %
hosting_device_id, params=_params) | Fetches a list of config agents handling a hosting device. | entailment |
def _parse_nexus_vni_range(self, tunnel_range):
"""Raise an exception for invalid tunnel range or malformed range."""
for ident in tunnel_range:
if not self._is_valid_nexus_vni(ident):
raise exc.NetworkTunnelRangeError(
tunnel_range=tunnel_range,
error=_("%(id)s is not a valid Nexus VNI value.") %
{'id': ident})
if tunnel_range[1] < tunnel_range[0]:
raise exc.NetworkTunnelRangeError(
tunnel_range=tunnel_range,
error=_("End of tunnel range is less than start of "
"tunnel range.")) | Raise an exception for invalid tunnel range or malformed range. | entailment |
def sync_allocations(self):
"""
Synchronize vxlan_allocations table with configured tunnel ranges.
"""
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.tunnel_ranges:
vxlan_vnis |= set(six.moves.range(tun_min, tun_max + 1))
session = bc.get_writer_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
# fetch results as list via all() because we'll be iterating
# through them twice
allocs = (session.query(nexus_models_v2.NexusVxlanAllocation).
with_lockmode("update").all())
# collect all vnis present in db
existing_vnis = set(alloc.vxlan_vni for alloc in allocs)
# collect those vnis that needs to be deleted from db
vnis_to_remove = [alloc.vxlan_vni for alloc in allocs
if (alloc.vxlan_vni not in vxlan_vnis and
not alloc.allocated)]
# Immediately delete vnis in chunks. This leaves no work for
# flush at the end of transaction
bulk_size = 100
chunked_vnis = (vnis_to_remove[i:i + bulk_size] for i in
range(0, len(vnis_to_remove), bulk_size))
for vni_list in chunked_vnis:
session.query(nexus_models_v2.NexusVxlanAllocation).filter(
nexus_models_v2.NexusVxlanAllocation.
vxlan_vni.in_(vni_list)).delete(
synchronize_session=False)
# collect vnis that need to be added
vnis = list(vxlan_vnis - existing_vnis)
chunked_vnis = (vnis[i:i + bulk_size] for i in
range(0, len(vnis), bulk_size))
for vni_list in chunked_vnis:
bulk = [{'vxlan_vni': vni, 'allocated': False}
for vni in vni_list]
session.execute(nexus_models_v2.NexusVxlanAllocation.
__table__.insert(), bulk) | Synchronize vxlan_allocations table with configured tunnel ranges. | entailment |
def _setup_notification_listener(self, topic_name, url):
"""Setup notification listener for a service."""
self.notify_listener = rpc.DfaNotifcationListener(
topic_name, url, rpc.DfaNotificationEndpoints(self)) | Setup notification listener for a service. | entailment |
def callback(self, timestamp, event_type, payload):
"""Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event
"""
try:
data = (event_type, payload)
LOG.debug('RX NOTIFICATION ==>\nevent_type: %(event)s, '
'payload: %(payload)s\n', (
{'event': event_type, 'payload': payload}))
if 'create' in event_type:
pri = self._create_pri
elif 'delete' in event_type:
pri = self._delete_pri
elif 'update' in event_type:
pri = self._update_pri
else:
pri = self._delete_pri
self._pq.put((pri, timestamp, data))
except Exception as exc:
LOG.exception('Error: %(err)s for event %(event)s',
{'err': str(exc), 'event': event_type}) | Callback method for processing events in notification queue.
:param timestamp: time the message is received.
:param event_type: event type in the notification queue such as
identity.project.created, identity.project.deleted.
:param payload: Contains information of an event | entailment |
def event_handler(self):
"""Wait on queue for listening to the events."""
if not self._notify_queue:
LOG.error('event_handler: no notification queue for %s',
self._service_name)
return
LOG.debug('calling event handler for %s', self)
self.start()
self.wait() | Wait on queue for listening to the events. | entailment |
def set_driver(self, resource):
"""Set the driver for a neutron resource.
:param resource: Neutron resource in dict format.
Expected keys::
{
'id': <value>,
'hosting_device': { 'id': <value>, },
'router_type': {'cfg_agent_driver': <value>, }
}
:returns: driver object
"""
try:
resource_id = resource['id']
hosting_device = resource['hosting_device']
hd_id = hosting_device['id']
if hd_id in self._hosting_device_routing_drivers_binding:
driver = self._hosting_device_routing_drivers_binding[hd_id]
self._drivers[resource_id] = driver
else:
driver_class = resource['router_type']['cfg_agent_driver']
# save a copy of the obfuscated credentials
obfusc_creds = dict(hosting_device.get('credentials'))
if obfusc_creds:
# get un-obfuscated password
real_pw = self._cfg_agent.get_hosting_device_password(
obfusc_creds.get('credentials_id'))
hosting_device['credentials']['password'] = real_pw
driver = importutils.import_object(driver_class,
**hosting_device)
self._hosting_device_routing_drivers_binding[hd_id] = driver
if obfusc_creds:
hosting_device['credentials'] = obfusc_creds
self._drivers[resource_id] = driver
return driver
except ImportError:
with excutils.save_and_reraise_exception(reraise=False):
LOG.exception("Error loading cfg agent driver %(driver)s "
"for hosting device template %(t_name)s"
"(%(t_id)s)",
{'driver': driver_class, 't_id': hd_id,
't_name': resource['name']})
raise cfg_exceptions.DriverNotExist(driver=driver_class)
except KeyError as e:
with excutils.save_and_reraise_exception(reraise=False):
raise cfg_exceptions.DriverNotSetForMissingParameter(e) | Set the driver for a neutron resource.
:param resource: Neutron resource in dict format.
Expected keys::
{
'id': <value>,
'hosting_device': { 'id': <value>, },
'router_type': {'cfg_agent_driver': <value>, }
}
:returns: driver object | entailment |
def _import_ucsmsdk(self):
"""Imports the ucsmsdk module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of ucsmsdk.
"""
# Check if SSL certificate checking has been disabled.
# If so, warn the user before proceeding.
if not CONF.ml2_cisco_ucsm.ucsm_https_verify:
LOG.warning(const.SSL_WARNING)
# Monkey patch the ucsmsdk version of ssl to enable https_verify if
# required
from networking_cisco.ml2_drivers.ucsm import ucs_ssl
ucs_driver = importutils.import_module('ucsmsdk.ucsdriver')
ucs_driver.ssl = ucs_ssl
class ucsmsdk(object):
handle = importutils.import_class(
'ucsmsdk.ucshandle.UcsHandle')
fabricVlan = importutils.import_class(
'ucsmsdk.mometa.fabric.FabricVlan.FabricVlan')
vnicProfile = importutils.import_class(
'ucsmsdk.mometa.vnic.VnicProfile.VnicProfile')
vnicEtherIf = importutils.import_class(
'ucsmsdk.mometa.vnic.VnicEtherIf.VnicEtherIf')
vmVnicProfCl = importutils.import_class(
'ucsmsdk.mometa.vm.VmVnicProfCl.VmVnicProfCl')
return ucsmsdk | Imports the ucsmsdk module.
This module is not installed as part of the normal Neutron
distributions. It is imported dynamically in this module so that
the import can be mocked, allowing unit testing without requiring
the installation of ucsmsdk. | entailment |
def ucs_manager_connect(self, ucsm_ip):
"""Connects to a UCS Manager."""
if not self.ucsmsdk:
self.ucsmsdk = self._import_ucsmsdk()
ucsm = CONF.ml2_cisco_ucsm.ucsms.get(ucsm_ip)
if not ucsm or not ucsm.ucsm_username or not ucsm.ucsm_password:
LOG.error('UCS Manager network driver failed to get login '
'credentials for UCSM %s', ucsm_ip)
return None
handle = self.ucsmsdk.handle(ucsm_ip, ucsm.ucsm_username,
ucsm.ucsm_password)
try:
handle.login()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConnectFailed(ucsm_ip=ucsm_ip, exc=e)
return handle | Connects to a UCS Manager. | entailment |
def _create_ucsm_host_to_service_profile_mapping(self):
"""Reads list of Service profiles and finds associated Server."""
# Get list of UCSMs without host list given in the config
ucsm_ips = [ip for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if not ucsm.ucsm_host_list]
for ucsm_ip in ucsm_ips:
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
try:
sp_list = handle.query_classid('lsServer')
if sp_list is not None:
for sp in sp_list:
if sp.pn_dn:
server_name = handle.query_dn(sp.pn_dn).name
if (server_name and not
sp.oper_src_templ_name):
LOG.debug('Server %s info retrieved '
'from UCSM %s', server_name, ucsm_ip)
key = (ucsm_ip, server_name)
self.ucsm_sp_dict[key] = str(sp.dn)
self.ucsm_host_dict[server_name] = ucsm_ip
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e) | Reads list of Service profiles and finds associated Server. | entailment |
def _create_vlanprofile(self, handle, vlan_id, ucsm_ip):
"""Creates VLAN profile to able associated with the Port Profile."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
vp1 = handle.query_dn(const.VLAN_PATH)
if not vp1:
LOG.warning('UCS Manager network driver Vlan Profile '
'path at %s missing', const.VLAN_PATH)
return False
# Create a vlan profile with the given vlan_id
vp2 = self.ucsmsdk.fabricVlan(
parent_mo_or_dn=vp1,
name=vlan_name,
compression_type=const.VLAN_COMPRESSION_TYPE,
sharing=const.NONE,
pub_nw_name="",
id=str(vlan_id),
mcast_policy_name="",
default_net="no")
handle.add_mo(vp2)
handle.commit()
if vp2:
LOG.debug('UCS Manager network driver Created Vlan '
'Profile %s at %s', vlan_name, vlan_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Vlan Profile',
vlan_name, ucsm_ip) | Creates VLAN profile to able associated with the Port Profile. | entailment |
def _create_port_profile(self, handle, profile_name, vlan_id,
vnic_type, ucsm_ip, trunk_vlans, qos_policy):
"""Creates a Port Profile on the UCS Manager.
Significant parameters set in the port profile are:
1. Port profile name - Should match what was set in vif_details
2. High performance mode - For VM-FEX to be enabled/configured on
the port using this port profile, this mode should be enabled.
3. Vlan id - Vlan id used by traffic to and from the port.
"""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name)
vlan_name = self.make_vlan_name(vlan_id)
vlan_associate_path = (const.PORT_PROFILESETDN +
const.VNIC_PATH_PREFIX + profile_name +
const.VLAN_PATH_PREFIX + vlan_name)
cl_profile_name = const.CLIENT_PROFILE_NAME_PREFIX + str(vlan_id)
cl_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
profile_name + const.CLIENT_PROFILE_PATH_PREFIX +
cl_profile_name)
# Remove this Port Profile from the delete DB table if it was
# addded there due to a previous delete.
self.ucsm_db.remove_port_profile_to_delete(profile_name, ucsm_ip)
# Check if direct or macvtap mode
if vnic_type == bc.portbindings.VNIC_DIRECT:
port_mode = const.HIGH_PERF
else:
port_mode = const.NONE
try:
port_profile = handle.query_dn(const.PORT_PROFILESETDN)
if not port_profile:
LOG.warning('UCS Manager network driver Port Profile '
'path at %s missing',
const.PORT_PROFILESETDN)
return False
# Create a port profile on the UCS Manager
p_profile = self.ucsmsdk.vnicProfile(
parent_mo_or_dn=port_profile,
name=profile_name,
policy_owner="local",
nw_ctrl_policy_name="",
pin_to_group_name="",
descr=const.DESCR,
qos_policy_name=qos_policy,
host_nw_ioperf=port_mode,
max_ports=const.MAX_PORTS)
handle.add_mo(p_profile)
if not p_profile:
LOG.warning('UCS Manager network driver could not '
'create Port Profile %s.', profile_name)
return False
LOG.debug('UCS Manager network driver associating Vlan '
'Profile with Port Profile at %s',
vlan_associate_path)
# Associate port profile with vlan profile
mo = self.ucsmsdk.vnicEtherIf(
parent_mo_or_dn=p_profile,
name=vlan_name,
default_net="yes")
handle.add_mo(mo)
if not mo:
LOG.warning('UCS Manager network driver cannot '
'associate Vlan Profile to Port '
'Profile %s', profile_name)
return False
LOG.debug('UCS Manager network driver created Port Profile %s '
'at %s', profile_name, port_profile_dest)
# For Multi VLAN trunk support
if trunk_vlans:
for vlan in trunk_vlans:
vlan_name = self.make_vlan_name(vlan)
# Associate port profile with vlan profile
# for the trunk vlans
mo = self.ucsmsdk.vnicEtherIf(
parent_mo_or_dn=p_profile,
name=vlan_name,
default_net="no")
handle.add_mo(mo)
if not mo:
LOG.warning('UCS Manager network driver cannot '
'associate Vlan %(vlan)d to Port '
'Profile %(profile)s',
{'vlan': vlan, 'profile': profile_name})
cl_profile = self.ucsmsdk.vmVnicProfCl(
parent_mo_or_dn=p_profile,
org_path=".*",
name=cl_profile_name,
policy_owner="local",
sw_name=".*",
dc_name=".*",
descr=const.DESCR)
handle.add_mo(cl_profile)
if not cl_profile:
LOG.warning('UCS Manager network driver could not '
'create Client Profile %s.',
cl_profile_name)
return False
handle.commit()
LOG.debug('UCS Manager network driver created Client Profile '
'%s at %s', cl_profile_name, cl_profile_dest)
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Port Profile',
profile_name, ucsm_ip) | Creates a Port Profile on the UCS Manager.
Significant parameters set in the port profile are:
1. Port profile name - Should match what was set in vif_details
2. High performance mode - For VM-FEX to be enabled/configured on
the port using this port profile, this mode should be enabled.
3. Vlan id - Vlan id used by traffic to and from the port. | entailment |
def _update_service_profile(self, handle, service_profile,
vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
vlan_name = self.make_vlan_name(vlan_id)
try:
obj = handle.query_dn(service_profile)
if not obj:
LOG.debug('UCS Manager network driver could not find '
'Service Profile %s in UCSM %s',
service_profile, ucsm_ip)
return False
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
eth_if = self.ucsmsdk.vnicEtherIf(
parent_mo_or_dn=eth,
name=vlan_name,
default_net="no")
handle.add_mo(eth_if)
if not eth_if:
LOG.debug('UCS Manager network driver could not '
'update Service Profile %s with vlan %d',
service_profile, vlan_id)
return False
else:
LOG.debug('UCS Manager network driver did not find '
'ethernet port at %s', eth_port_path)
handle.commit()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile',
vlan_name, ucsm_ip) | Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in. | entailment |
def update_vnic_template(self, host_id, vlan_id, physnet,
vnic_template_path, vnic_template):
"""Updates VNIC Template with the vlan_id."""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
vlan_name = self.make_vlan_name(vlan_id)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', vlan_id)
return False
try:
LOG.debug('VNIC Template Path: %s', vnic_template_path)
vnic_template_full_path = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
LOG.debug('VNIC Template Path: %s for physnet %s',
vnic_template_full_path, physnet)
mo = handle.query_dn(vnic_template_full_path)
if not mo:
LOG.error('UCS Manager network driver could '
'not find VNIC template %s',
vnic_template_full_path)
return False
vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX +
vlan_name)
LOG.debug('VNIC Template VLAN path: %s', vlan_dn)
eth_if = self.ucsmsdk.vnicEtherIf(
parent_mo_or_dn=mo,
name=vlan_name,
default_net="no")
handle.add_mo(eth_if)
if not eth_if:
LOG.error('UCS Manager network driver could '
'not add VLAN %(vlan_name)s to VNIC '
'template %(vnic_template_full_path)s',
{'vlan_name': vlan_name,
'vnic_template_full_path': vnic_template_full_path})
return False
handle.commit()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'VNIC Template',
vlan_id, ucsm_ip) | Updates VNIC Template with the vlan_id. | entailment |
def _delete_vlan_profile(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile from UCS Manager."""
vlan_name = self.make_vlan_name(vlan_id)
vlan_profile_dest = (const.VLAN_PATH + const.VLAN_PROFILE_PATH_PREFIX +
vlan_name)
try:
obj = handle.query_dn(vlan_profile_dest)
if obj:
handle.remove_mo(obj)
handle.commit()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigFailed(config=vlan_id,
ucsm_ip=ucsm_ip, exc=e) | Deletes VLAN Profile from UCS Manager. | entailment |
def _delete_port_profile_from_ucsm(self, handle, port_profile, ucsm_ip):
"""Deletes Port Profile from UCS Manager."""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
port_profile)
# Find port profile on the UCS Manager
p_profile = handle.query_dn(port_profile_dest)
if p_profile:
handle.remove_mo(p_profile)
else:
LOG.warning('UCS Manager network driver did not find '
'Port Profile %s to delete.',
port_profile)
handle.commit() | Deletes Port Profile from UCS Manager. | entailment |
def _remove_vlan_from_all_service_profiles(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN Profile config from server's ethernet ports."""
service_profile_list = []
for key, value in six.iteritems(self.ucsm_sp_dict):
if (ucsm_ip in key) and value:
service_profile_list.append(value)
if not service_profile_list:
# Nothing to do
return
try:
for service_profile in service_profile_list:
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
# 1. From the Service Profile config, access the
# configuration for its ports.
# 2. Check if that Vlan has been configured on each port
# 3. If Vlan config found, remove it.
obj = handle.query_dn(service_profile)
if obj:
# Check if this vlan_id has been configured on the
# ports in this Service profile
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
vlan_name = self.make_vlan_name(vlan_id)
vlan_path = eth_port_path + "/if-" + vlan_name
vlan = handle.query_dn(vlan_path)
if vlan:
# Found vlan config. Now remove it.
handle.remove_mo(vlan)
handle.commit()
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN Profile config from server's ethernet ports. | entailment |
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN config from all SP Templates that have it."""
sp_template_info_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values())
vlan_name = self.make_vlan_name(vlan_id)
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
try:
# sp_template_info_list is a list of tuples.
# Each tuple is of the form :
# (ucsm_ip, sp_template_path, sp_template)
for sp_template_info in sp_template_info_list:
sp_template_path = sp_template_info.path
sp_template = sp_template_info.name
sp_template_full_path = (sp_template_path +
const.SP_TEMPLATE_PREFIX + sp_template)
obj = handle.query_dn(sp_template_full_path)
if not obj:
LOG.error('UCS Manager network driver could not '
'find Service Profile template %s',
sp_template_full_path)
continue
eth_port_paths = ["%s%s" % (sp_template_full_path, ep)
for ep in virtio_port_list]
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
vlan_path = (eth_port_path +
const.VLAN_PATH_PREFIX + vlan_name)
vlan = handle.query_dn(vlan_path)
if vlan:
# Found vlan config. Now remove it.
handle.remove_mo(vlan)
else:
LOG.debug('UCS Manager network driver did not '
'find VLAN %s at %s', vlan_name, eth_port_path)
else:
LOG.debug('UCS Manager network driver did not '
'find ethernet port at %s', eth_port_path)
handle.commit()
return True
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN config from all SP Templates that have it. | entailment |
def _remove_vlan_from_vnic_templates(self, handle, vlan_id, ucsm_ip):
"""Removes VLAN from all VNIC templates that have it enabled."""
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
vnic_template_info = ucsm.vnic_template_list.values()
vlan_name = self.make_vlan_name(vlan_id)
if not vnic_template_info:
# Nothing to do
return
try:
for temp_info in vnic_template_info:
vnic_template = temp_info.template
vnic_template_path = temp_info.path
vnic_template_full_path = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
LOG.debug('vnic_template_full_path: %s',
vnic_template_full_path)
mo = handle.query_dn(vnic_template_full_path)
if not mo:
LOG.error('UCS Manager network driver could '
'not find VNIC template %s at',
vnic_template_full_path)
continue
vlan_dn = (vnic_template_full_path +
const.VLAN_PATH_PREFIX + vlan_name)
LOG.debug('VNIC Template VLAN path; %s', vlan_dn)
eth_if = handle.query_dn(vlan_dn)
if not eth_if:
LOG.error('UCS Manager network driver could not '
'delete VLAN %(vlan_name)s from VNIC '
'template %(vnic_template_full_path)s',
{'vlan_name': vlan_name,
'vnic_template_full_path':
vnic_template_full_path})
if eth_if:
handle.remove_mo(eth_if)
handle.commit()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'VNIC Template',
vlan_id, ucsm_ip) | Removes VLAN from all VNIC templates that have it enabled. | entailment |
def nova_services_up(self):
"""Checks if required Nova services are up and running.
returns: True if all needed Nova services are up, False otherwise
"""
required = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
'nova-compute'])
try:
services = self._nclient.services.list()
# There are several individual Nova client exceptions but they have
# no other common base than Exception, hence the long list.
except Exception as e:
LOG.error('Failure determining running Nova services: %s', e)
return False
return not bool(required.difference(
[service.binary for service in services
if service.status == 'enabled' and service.state == 'up'])) | Checks if required Nova services are up and running.
returns: True if all needed Nova services are up, False otherwise | entailment |
def _get_unscheduled_routers(self, plugin, context):
"""Get routers with no agent binding."""
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
context, plugin = plugin, context
# TODO(gongysh) consider the disabled agent's router
no_agent_binding = ~sql.exists().where(
bc.Router.id == bc.rb_model.RouterL3AgentBinding.router_id)
# Modified to only include routers of network namespace type
ns_routertype_id = plugin.get_namespace_router_type_id(context)
query = context.session.query(bc.Router.id)
query = query.join(l3_models.RouterHostingDeviceBinding)
query = query.filter(
l3_models.RouterHostingDeviceBinding.router_type_id ==
ns_routertype_id, no_agent_binding)
unscheduled_router_ids = [router_id_[0] for router_id_ in query]
if unscheduled_router_ids:
return plugin.get_routers(
context, filters={'id': unscheduled_router_ids})
return [] | Get routers with no agent binding. | entailment |
def _filter_unscheduled_routers(self, plugin, context, routers):
"""Filter from list of routers the ones that are not scheduled.
Only for release < pike.
"""
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
context, plugin = plugin, context
unscheduled_routers = []
for router in routers:
if (router[routertype.TYPE_ATTR] !=
plugin.get_namespace_router_type_id(context)):
# ignore non-namespace routers
continue
l3_agents = plugin.get_l3_agents_hosting_routers(
context, [router['id']])
if l3_agents:
LOG.debug('Router %(router_id)s has already been '
'hosted by L3 agent %(agent_id)s',
{'router_id': router['id'],
'agent_id': l3_agents[0]['id']})
else:
unscheduled_routers.append(router)
return unscheduled_routers | Filter from list of routers the ones that are not scheduled.
Only for release < pike. | entailment |
def _get_underscheduled_routers(self, plugin, context):
"""For release >= pike."""
underscheduled_routers = []
max_agents_for_ha = plugin.get_number_of_agents_for_scheduling(context)
for router, count in plugin.get_routers_l3_agents_count(context):
if (router[routertype.TYPE_ATTR] !=
plugin.get_namespace_router_type_id(context)):
# ignore non-namespace routers
continue
if (count < 1 or
router.get('ha', False) and count < max_agents_for_ha):
# Either the router was un-scheduled (scheduled to 0 agents),
# or it's an HA router and it was under-scheduled (scheduled to
# less than max_agents_for_ha). Either way, it should be added
# to the list of routers we want to handle.
underscheduled_routers.append(router)
return underscheduled_routers | For release >= pike. | entailment |
def auto_schedule_hosting_devices(self, plugin, context, agent_host):
"""Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>.
"""
query = context.session.query(bc.Agent)
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
host=agent_host, admin_state_up=True)
try:
cfg_agent_db = query.one()
except (exc.MultipleResultsFound, exc.NoResultFound):
LOG.debug('No enabled Cisco cfg agent on host %s', agent_host)
return
if cfg_agentschedulers_db.CfgAgentSchedulerDbMixin.is_agent_down(
cfg_agent_db.heartbeat_timestamp):
LOG.warning('Cisco cfg agent %s is not alive',
cfg_agent_db.id)
return cfg_agent_db | Schedules unassociated hosting devices to Cisco cfg agent.
Schedules hosting devices to agent running on <agent_host>. | entailment |
def schedule_hosting_device(self, plugin, context, hosting_device):
"""Selects Cisco cfg agent that will configure <hosting_device>."""
active_cfg_agents = plugin.get_cfg_agents(context, active=True)
if not active_cfg_agents:
LOG.warning('There are no active Cisco cfg agents')
# No worries, once a Cisco cfg agent is started and
# announces itself any "dangling" hosting devices
# will be scheduled to it.
return
LOG.debug('Randomly selecting a Cisco cfg agent among %d candidates' %
len(active_cfg_agents))
return random.choice(active_cfg_agents) | Selects Cisco cfg agent that will configure <hosting_device>. | entailment |
def _get_external_network_dict(self, context, port_db):
"""Get external network information
Get the information about the external network,
so that it can be used to create the hidden port,
subnet, and network.
"""
if port_db.device_owner == DEVICE_OWNER_ROUTER_GW:
network = self._core_plugin.get_network(context,
port_db.network_id)
else:
router = self.l3_plugin.get_router(context,
port_db.device_id)
ext_gw_info = router.get(EXTERNAL_GW_INFO)
if not ext_gw_info:
return {}, None
network = self._core_plugin.get_network(context,
ext_gw_info['network_id'])
# network names in GBP workflow need to be reduced, since
# the network may contain UUIDs
external_network = self.get_ext_net_name(network['name'])
# TODO(tbachman): see if we can get rid of the default
transit_net = self.transit_nets_cfg.get(
external_network) or self._default_ext_dict
transit_net['network_name'] = external_network
return transit_net, network | Get external network information
Get the information about the external network,
so that it can be used to create the hidden port,
subnet, and network. | entailment |
def apic_driver(self):
"""Get APIC driver
There are different drivers for the GBP workflow
and Neutron workflow for APIC. First see if the GBP
workflow is active, and if so get the APIC driver for it.
If the GBP service isn't installed, try to get the driver
from the Neutron (APIC ML2) workflow.
"""
if not self._apic_driver:
try:
self._apic_driver = (bc.get_plugin(
'GROUP_POLICY').policy_driver_manager.
policy_drivers['apic'].obj)
self._get_ext_net_name = self._get_ext_net_name_gbp
self._get_vrf_context = self._get_vrf_context_gbp
except AttributeError:
LOG.info("GBP service plugin not present -- will "
"try APIC ML2 plugin.")
if not self._apic_driver:
try:
self._apic_driver = (
self._core_plugin.mechanism_manager.mech_drivers[
'cisco_apic_ml2'].obj)
self._get_ext_net_name = self._get_ext_net_name_neutron
self._get_vrf_context = self._get_vrf_context_neutron
except KeyError:
LOG.error("APIC ML2 plugin not present: "
"no APIC ML2 driver could be found.")
raise AciDriverNoAciDriverInstalledOrConfigured()
return self._apic_driver | Get APIC driver
There are different drivers for the GBP workflow
and Neutron workflow for APIC. First see if the GBP
workflow is active, and if so get the APIC driver for it.
If the GBP service isn't installed, try to get the driver
from the Neutron (APIC ML2) workflow. | entailment |
def _snat_subnet_for_ext_net(self, context, subnet, net):
"""Determine if an SNAT subnet is for this external network.
This method determines if a given SNAT subnet is intended for
the passed external network.
For APIC ML2/Neutron workflow, SNAT subnets are created on
a separate network from the external network. The association
with an external network is made by putting the name of the
external network in the name of the SNAT network name, using
a well-known prefix.
"""
if subnet['network_id'] == net['id']:
return True
network = self._core_plugin.get_network(
context.elevated(), subnet['network_id'])
ext_net_name = network['name']
if (APIC_SNAT_NET + '-') in ext_net_name:
# This is APIC ML2 mode -- we need to strip the prefix
ext_net_name = ext_net_name[len(APIC_SNAT_NET + '-'):]
if net['id'] == ext_net_name:
return True
return False | Determine if an SNAT subnet is for this external network.
This method determines if a given SNAT subnet is intended for
the passed external network.
For APIC ML2/Neutron workflow, SNAT subnets are created on
a separate network from the external network. The association
with an external network is made by putting the name of the
external network in the name of the SNAT network name, using
a well-known prefix. | entailment |
def extend_hosting_port_info(self, context, port_db, hosting_device,
hosting_info):
"""Get the segmenetation ID and interface
This extends the hosting info attribute with the segmentation ID
and physical interface used on the external router to connect to
the ACI fabric. The segmentation ID should have been set already
by the call to allocate_hosting_port, but if it's not present, use
the value from the port resource.
"""
if hosting_info.get('segmentation_id') is None:
LOG.debug('No segmentation ID in hosting_info -- assigning')
hosting_info['segmentation_id'] = (
port_db.hosting_info.get('segmentation_id'))
is_external = (port_db.device_owner == DEVICE_OWNER_ROUTER_GW)
hosting_info['physical_interface'] = self._get_interface_info(
hosting_device['id'], port_db.network_id, is_external)
ext_dict, net = self._get_external_network_dict(context, port_db)
if is_external and ext_dict:
hosting_info['network_name'] = ext_dict['network_name']
hosting_info['cidr_exposed'] = ext_dict['cidr_exposed']
hosting_info['gateway_ip'] = ext_dict['gateway_ip']
details = self.get_vrf_context(context,
port_db['device_id'], port_db)
router_id = port_db.device_id
router = self.l3_plugin.get_router(context, router_id)
# skip routers not created by the user -- they will have
# empty-string tenant IDs
if router.get(ROUTER_ROLE_ATTR):
return
hosting_info['vrf_id'] = details['vrf_id']
if ext_dict.get('global_config'):
hosting_info['global_config'] = (
ext_dict['global_config'])
self._add_snat_info(context, router, net, hosting_info)
else:
if ext_dict.get('interface_config'):
hosting_info['interface_config'] = ext_dict['interface_config'] | Get the segmenetation ID and interface
This extends the hosting info attribute with the segmentation ID
and physical interface used on the external router to connect to
the ACI fabric. The segmentation ID should have been set already
by the call to allocate_hosting_port, but if it's not present, use
the value from the port resource. | entailment |
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
"""Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI).
"""
# If this is a router interface, the VLAN comes from APIC.
# If it's the gateway, the VLAN comes from the segment ID
if port_db.get('device_owner') == DEVICE_OWNER_ROUTER_GW:
ext_dict, net = self._get_external_network_dict(context, port_db)
# If an OpFlex network is used on the external network,
# the actual segment ID comes from the config file
if net and net.get('provider:network_type') == 'opflex':
if ext_dict.get('segmentation_id'):
return {'allocated_port_id': port_db.id,
'allocated_vlan': ext_dict['segmentation_id']}
else:
raise AciDriverConfigMissingSegmentationId(ext_net=net)
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id)
# shouldn't happen, but just in case
if port_db.get('device_owner') != DEVICE_OWNER_ROUTER_INTF:
return
# get the external network that this port connects to.
# if there isn't an external gateway yet on the router,
# then don't allocate a port
router = self.l3_plugin.get_router(context, router_id)
gw_info = router[EXTERNAL_GW_INFO]
if not gw_info:
return
network_id = gw_info.get('network_id')
networks = self._core_plugin.get_networks(
context.elevated(), {'id': [network_id]})
l3out_network = networks[0]
l3out_name = self.get_ext_net_name(l3out_network['name'])
# For VLAN apic driver provides VLAN tag
details = self.get_vrf_context(context, router_id, port_db)
if details is None:
LOG.debug('aci_vlan_trunking_driver: No vrf_details')
return
vrf_name = details.get('vrf_name')
vrf_tenant = details.get('vrf_tenant')
allocated_vlan = self.apic_driver.l3out_vlan_alloc.get_vlan_allocated(
l3out_name, vrf_name, vrf_tenant=vrf_tenant)
if allocated_vlan is None:
if not vrf_tenant:
# TODO(tbachman): I can't remember why this is here
return super(AciVLANTrunkingPlugDriver,
self).allocate_hosting_port(
context, router_id,
port_db, network_type, hosting_device_id
)
# Database must have been messed up if this happens ...
return
return {'allocated_port_id': port_db.id,
'allocated_vlan': allocated_vlan} | Get the VLAN and port for this hosting device
The VLAN used between the APIC and the external router is stored
by the APIC driver. This calls into the APIC driver to first get
the ACI VRF information associated with this port, then uses that
to look up the VLAN to use for this port to the external router
(kept as part of the L3 Out policy in ACI). | entailment |
def _get_ext_net_name_gbp(self, network_name):
"""Get the external network name
The name of the external network used in the APIC
configuration file can be different from the name
of the external network in Neutron, especially using
the GBP workflow
"""
prefix = network_name[:re.search(UUID_REGEX, network_name).start() - 1]
return prefix.strip(APIC_OWNED) | Get the external network name
The name of the external network used in the APIC
configuration file can be different from the name
of the external network in Neutron, especially using
the GBP workflow | entailment |
def is_valid_mac(addr):
"""Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx
"""
addrs = addr.split(':')
if len(addrs) != 6:
return False
for m in addrs:
try:
if int(m, 16) > 255:
return False
except ValueError:
return False
return True | Check the syntax of a given mac address.
The acceptable format is xx:xx:xx:xx:xx:xx | entailment |
def make_cidr(gw, mask):
"""Create network address in CIDR format.
Return network address for a given gateway address and netmask.
"""
try:
int_mask = (0xFFFFFFFF << (32 - int(mask))) & 0xFFFFFFFF
gw_addr_int = struct.unpack('>L', socket.inet_aton(gw))[0] & int_mask
return (socket.inet_ntoa(struct.pack("!I", gw_addr_int)) +
'/' + str(mask))
except (socket.error, struct.error, ValueError, TypeError):
return | Create network address in CIDR format.
Return network address for a given gateway address and netmask. | entailment |
def find_agent_host_id(this_host):
"""Returns the neutron agent host id for RHEL-OSP6 HA setup."""
host_id = this_host
try:
for root, dirs, files in os.walk('/run/resource-agents'):
for fi in files:
if 'neutron-scale-' in fi:
host_id = 'neutron-n-' + fi.split('-')[2]
break
return host_id
except IndexError:
return host_id | Returns the neutron agent host id for RHEL-OSP6 HA setup. | entailment |
def _build_credentials(self, nexus_switches):
"""Build credential table for Rest API Client.
:param nexus_switches: switch config
:returns credentials: switch credentials list
"""
credentials = {}
for switch_ip, attrs in nexus_switches.items():
credentials[switch_ip] = (
attrs[const.USERNAME], attrs[const.PASSWORD],
attrs[const.HTTPS_VERIFY], attrs[const.HTTPS_CERT],
None)
if not attrs[const.HTTPS_VERIFY]:
LOG.warning("HTTPS Certificate verification is "
"disabled. Your connection to Nexus "
"Switch %(ip)s is insecure.",
{'ip': switch_ip})
return credentials | Build credential table for Rest API Client.
:param nexus_switches: switch config
:returns credentials: switch credentials list | entailment |
def capture_and_print_timeshot(self, start_time, which,
other=99, switch="x.x.x.x"):
"""Determine delta, keep track, and print results."""
curr_timeout = time.time() - start_time
if which in self.time_stats:
self.time_stats[which]["total_time"] += curr_timeout
self.time_stats[which]["total_count"] += 1
if (curr_timeout < self.time_stats[which]["min"]):
self.time_stats[which]["min"] = curr_timeout
if (curr_timeout > self.time_stats[which]["max"]):
self.time_stats[which]["max"] = curr_timeout
else:
self.time_stats[which] = {
"total_time": curr_timeout,
"total_count": 1,
"min": curr_timeout,
"max": curr_timeout}
LOG.debug("NEXUS_TIME_STATS %(switch)s, pid %(pid)d, tid %(tid)d: "
"%(which)s_timeout %(curr)f count %(count)d "
"average %(ave)f other %(other)d min %(min)f max %(max)f",
{'switch': switch,
'pid': os.getpid(),
'tid': threading.current_thread().ident,
'which': which,
'curr': curr_timeout,
'count': self.time_stats[which]["total_count"],
'ave': (self.time_stats[which]["total_time"] /
self.time_stats[which]["total_count"]),
'other': other,
'min': self.time_stats[which]["min"],
'max': self.time_stats[which]["max"]}) | Determine delta, keep track, and print results. | entailment |
def get_interface_switch(self, nexus_host,
intf_type, interface):
"""Get the interface data from host.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns response: Returns interface data
"""
if intf_type == "ethernet":
path_interface = "phys-[eth" + interface + "]"
else:
path_interface = "aggr-[po" + interface + "]"
action = snipp.PATH_IF % path_interface
starttime = time.time()
response = self.client.rest_get(action, nexus_host)
self.capture_and_print_timeshot(starttime, "getif",
switch=nexus_host)
LOG.debug("GET call returned interface %(if_type)s %(interface)s "
"config", {'if_type': intf_type, 'interface': interface})
return response | Get the interface data from host.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns response: Returns interface data | entailment |
def _get_interface_switch_trunk_present(
self, nexus_host, intf_type, interface):
"""Check if 'switchport trunk' configs present.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns mode_found: True if 'trunk mode' present
:returns vlan_configured: True if trunk allowed vlan list present
"""
result = self.get_interface_switch(nexus_host, intf_type, interface)
if_type = 'l1PhysIf' if intf_type == "ethernet" else 'pcAggrIf'
if_info = result['imdata'][0][if_type]
try:
mode_cfg = if_info['attributes']['mode']
except Exception:
mode_cfg = None
mode_found = (mode_cfg == "trunk")
try:
vlan_list = if_info['attributes']['trunkVlans']
except Exception:
vlan_list = None
vlan_configured = (vlan_list != const.UNCONFIGURED_VLAN)
return mode_found, vlan_configured | Check if 'switchport trunk' configs present.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns mode_found: True if 'trunk mode' present
:returns vlan_configured: True if trunk allowed vlan list present | entailment |
def add_ch_grp_to_interface(
self, nexus_host, if_type, port, ch_grp):
"""Applies channel-group n to ethernet interface."""
if if_type != "ethernet":
LOG.error("Unexpected interface type %(iftype)s when "
"adding change group", {'iftype': if_type})
return
starttime = time.time()
path_snip = snipp.PATH_ALL
path_interface = "phys-[eth" + port + "]"
body_snip = snipp.BODY_ADD_CH_GRP % (ch_grp, ch_grp, path_interface)
self.send_edit_string(nexus_host, path_snip, body_snip)
self.capture_and_print_timeshot(
starttime, "add_ch_group",
switch=nexus_host) | Applies channel-group n to ethernet interface. | entailment |
def _apply_user_port_channel_config(self, nexus_host, vpc_nbr):
"""Adds STP and no lacp suspend config to port channel. """
cli_cmds = self._get_user_port_channel_config(nexus_host, vpc_nbr)
if cli_cmds:
self._send_cli_conf_string(nexus_host, cli_cmds)
else:
vpc_str = str(vpc_nbr)
path_snip = snipp.PATH_ALL
body_snip = snipp.BODY_ADD_PORT_CH_P2 % (vpc_str, vpc_str)
self.send_edit_string(nexus_host, path_snip, body_snip) | Adds STP and no lacp suspend config to port channel. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.