sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router and remove the interfaces attached. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.remove_interface_router(rout_id,
body=body)
intf.get('id')
except Exception as exc:
LOG.error("Failed to delete router interface %(name)s, "
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return False
return True | Delete the openstack router and remove the interfaces attached. | entailment |
def delete_router_by_name(self, rtr_name, tenant_id):
"""Delete the openstack router and its interfaces given its name.
The interfaces should be already removed prior to calling this
function.
"""
try:
routers = self.neutronclient.list_routers()
rtr_list = routers.get('routers')
for rtr in rtr_list:
if rtr_name == rtr['name']:
self.neutronclient.delete_router(rtr['id'])
except Exception as exc:
LOG.error("Failed to get and delete router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return False
return True | Delete the openstack router and its interfaces given its name.
The interfaces should be already removed prior to calling this
function. | entailment |
def get_router_intf(self, router_id):
"""Retrieve the router interfaces. Incomplete, TODO(padkrish). """
try:
body = {}
self.neutronclient.show_router(router_id, body=body)
except Exception as exc:
LOG.error("Failed to show router interface %(id)s "
"Exc %(exc)s", {'id': router_id, 'exc': str(exc)})
return | Retrieve the router interfaces. Incomplete, TODO(padkrish). | entailment |
def get_rtr_name(self, router_id):
"""Retrieve the router name. Incomplete. """
try:
body = {}
router = self.neutronclient.show_router(router_id, body=body)
return router.get('router').get('name')
except Exception as exc:
LOG.error("Failed to show router interface %(id)s "
"Exc %(exc)s", {'id': router_id, 'exc': str(exc)}) | Retrieve the router name. Incomplete. | entailment |
def find_rtr_namespace(self, rout_id):
"""Find the namespace associated with the router. """
if rout_id is None:
return None
args = ['ip', 'netns', 'list']
try:
ns_list = utils.execute(args, root_helper=self.root_helper)
except Exception as exc:
LOG.error("Unable to find the namespace list Exception %s",
exc)
return None
for ns in ns_list.split():
if 'router' in ns and rout_id in ns:
return ns | Find the namespace associated with the router. | entailment |
def program_rtr(self, args, rout_id, namespace=None):
"""Execute the command against the namespace. """
if namespace is None:
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
final_args = ['ip', 'netns', 'exec', namespace] + args
try:
utils.execute(final_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': final_args, 'exception': e})
return False
return True | Execute the command against the namespace. | entailment |
def program_rtr_default_gw(self, tenant_id, rout_id, gw):
"""Program the default gateway of a router. """
args = ['route', 'add', 'default', 'gw', gw]
ret = self.program_rtr(args, rout_id)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | Program the default gateway of a router. | entailment |
def get_subnet_nwk_excl(self, tenant_id, excl_list, excl_part=False):
"""Retrieve the subnets of a network.
Get the subnets inside a network after applying the exclusion
list.
"""
net_list = self.get_network_by_tenant(tenant_id)
ret_subnet_list = []
for net in net_list:
if excl_part:
name = net.get('name')
part = name.partition('::')[2]
if part:
continue
subnet_lst = self.get_subnets_for_net(net.get('id'))
for subnet_elem in subnet_lst:
subnet = subnet_elem.get('cidr').split('/')[0]
subnet_and_mask = subnet_elem.get('cidr')
if subnet not in excl_list:
ret_subnet_list.append(subnet_and_mask)
return ret_subnet_list | Retrieve the subnets of a network.
Get the subnets inside a network after applying the exclusion
list. | entailment |
def program_rtr_all_nwk_next_hop(self, tenant_id, rout_id, next_hop,
excl_list):
"""Program the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
net_list = self.get_network_by_tenant(tenant_id)
for net in net_list:
subnet_lst = self.get_subnets_for_net(net.get('id'))
for subnet_elem in subnet_lst:
subnet = subnet_elem.get('cidr').split('/')[0]
subnet_and_mask = subnet_elem.get('cidr')
if subnet not in excl_list:
args = ['route', 'add', '-net', subnet_and_mask, 'gw',
next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s",
rout_id)
return False
return True | Program the next hop for all networks of a tenant. | entailment |
def program_rtr_nwk_next_hop(self, rout_id, next_hop, cidr):
"""Program the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['route', 'add', '-net', cidr, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s", rout_id)
return False
return True | Program the next hop for all networks of a tenant. | entailment |
def remove_rtr_nwk_next_hop(self, rout_id, next_hop, subnet_lst,
excl_list):
"""Remove the next hop for all networks of a tenant. """
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
args = ['ip', 'route']
ret = self.program_rtr_return(args, rout_id, namespace=namespace)
if ret is None:
LOG.error("Get routes return None %s", rout_id)
return False
routes = ret.split('\n')
concat_lst = subnet_lst + excl_list
for rout in routes:
if len(rout) == 0:
continue
nwk = rout.split()[0]
if nwk == 'default':
continue
nwk_no_mask = nwk.split('/')[0]
if nwk_no_mask not in concat_lst and nwk not in concat_lst:
args = ['route', 'del', '-net', nwk, 'gw', next_hop]
ret = self.program_rtr(args, rout_id, namespace=namespace)
if not ret:
LOG.error("Program router returned error for %s",
rout_id)
return False
return True | Remove the next hop for all networks of a tenant. | entailment |
def get_fw(self, fw_id):
"""Return the Firewall given its ID. """
fw = None
try:
fw = self.neutronclient.show_firewall(fw_id)
except Exception as exc:
LOG.error("Failed to get firewall list for id %(id)s, "
"Exc %(exc)s", {'id': fw_id, 'exc': str(exc)})
return fw | Return the Firewall given its ID. | entailment |
def get_fw_rule(self, rule_id):
"""Return the firewall rule, given its ID. """
rule = None
try:
rule = self.neutronclient.show_firewall_rule(rule_id)
except Exception as exc:
LOG.error("Failed to get firewall rule for id %(id)s "
"Exc %(exc)s", {'id': rule_id, 'exc': str(exc)})
return rule | Return the firewall rule, given its ID. | entailment |
def get_fw_policy(self, policy_id):
"""Return the firewall policy, given its ID. """
policy = None
try:
policy = self.neutronclient.show_firewall_policy(policy_id)
except Exception as exc:
LOG.error("Failed to get firewall plcy for id %(id)s "
"Exc %(exc)s",
{'id': policy_id, 'exc': str(exc)})
return policy | Return the firewall policy, given its ID. | entailment |
def _ensure_create_ha_compliant(self, router, router_type):
"""To be called in create_router() BEFORE router is created in DB."""
details = router.pop(ha.DETAILS, {})
if details == ATTR_NOT_SPECIFIED:
details = {}
res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED),
ha.DETAILS: details}
if not is_attr_set(res[ha.ENABLED]):
res[ha.ENABLED] = router_type['ha_enabled_by_default']
if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
if not res[ha.ENABLED]:
return res
if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)):
details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism
if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=details[ha.TYPE])
if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL,
ATTR_NOT_SPECIFIED)):
details[ha.REDUNDANCY_LEVEL] = (
cfg.CONF.ha.default_ha_redundancy_level)
if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY,
ATTR_NOT_SPECIFIED)):
details[ha.PROBE_CONNECTIVITY] = (
cfg.CONF.ha.connectivity_probing_enabled_by_default)
if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target
if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval
return res | To be called in create_router() BEFORE router is created in DB. | entailment |
def _create_redundancy_routers(self, context, new_router, ha_settings,
new_router_db, ports=None, expire_db=False):
"""To be called in create_router() AFTER router has been
created in DB.
"""
if (ha.ENABLED not in ha_settings or
not ha_settings[ha.ENABLED]):
new_router[ha.HA] = {ha.ENABLED: False}
return
ha_spec = ha_settings[ha.DETAILS]
priority = ha_spec.get(ha.PRIORITY, DEFAULT_MASTER_PRIORITY)
with context.session.begin(subtransactions=True):
r_ha_s_db = RouterHASetting(
router_id=new_router['id'],
ha_type=ha_spec[ha.TYPE],
redundancy_level=ha_spec[ha.REDUNDANCY_LEVEL],
priority=priority,
probe_connectivity=ha_spec[ha.PROBE_CONNECTIVITY],
probe_target=ha_spec[ha.PROBE_TARGET],
probe_interval=ha_spec[ha.PROBE_INTERVAL])
context.session.add(r_ha_s_db)
if r_ha_s_db.probe_connectivity and r_ha_s_db.probe_target is None:
LOG.warning("Connectivity probing for high-availability is "
"enabled but probe target is not specified. Please"
" configure option \'default_probe_target\'.")
e_context = context.elevated()
if new_router_db.gw_port:
# generate ha settings and extra port for router gateway (VIP) port
gw_port = self._core_plugin._make_port_dict(new_router_db.gw_port)
self._create_ha_group(e_context, new_router, gw_port, r_ha_s_db)
self._add_redundancy_routers(e_context, 1,
ha_spec[ha.REDUNDANCY_LEVEL] + 1,
new_router, ports or [], r_ha_s_db)
if expire_db:
context.session.expire(new_router_db)
self._extend_router_dict_ha(new_router, new_router_db) | To be called in create_router() AFTER router has been
created in DB. | entailment |
def _ensure_update_ha_compliant(self, router, current_router,
r_hd_binding_db):
"""To be called in update_router() BEFORE router has been
updated in DB.
"""
if r_hd_binding_db.role == ROUTER_ROLE_HA_REDUNDANCY:
return {ha.ENABLED: False}
auto_enable_ha = r_hd_binding_db.router_type.ha_enabled_by_default
requested_ha_details = router.pop(ha.DETAILS, {})
# If ha_details are given then ha is assumed to be enabled even if
# it is not explicitly specified or if auto_enable_ha says so.
# Note that None is used to indicate that request did not include any
# ha information was provided!
requested_ha_enabled = router.pop(
ha.ENABLED, True if requested_ha_details or auto_enable_ha is True
else None)
res = {}
ha_currently_enabled = current_router.get(ha.ENABLED, False)
# Note: must check for 'is True' as None implies attribute not given
if requested_ha_enabled is True or ha_currently_enabled is True:
if not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
curr_ha_details = current_router.get(ha.DETAILS, {})
if ha.TYPE in requested_ha_details:
requested_ha_type = requested_ha_details[ha.TYPE]
if (ha.TYPE in curr_ha_details and
requested_ha_type != curr_ha_details[ha.TYPE]):
raise ha.HATypeCannotBeChanged()
elif requested_ha_type in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=requested_ha_type)
if requested_ha_enabled:
res[ha.ENABLED] = requested_ha_enabled
if requested_ha_details:
res[ha.DETAILS] = requested_ha_details
elif requested_ha_enabled is False:
res[ha.ENABLED] = False
return res | To be called in update_router() BEFORE router has been
updated in DB. | entailment |
def _teardown_redundancy_router_gw_connectivity(self, context, router,
router_db,
plugging_driver):
"""To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB .
"""
if not router[ha.ENABLED]:
# No HA currently enabled so we're done
return
e_context = context.elevated()
# since gateway is about to change the ha group for the current gateway
# is removed, a new one will be created later
self._delete_ha_group(e_context, router_db.gw_port_id)
# teardown connectivity for the gw ports on the redundancy routers
# and remove those ports as new ones will be created later
rr_ids = []
for r_b_db in router_db.redundancy_bindings:
if plugging_driver is not None:
plugging_driver.teardown_logical_port_connectivity(
e_context, r_b_db.redundancy_router.gw_port,
r_b_db.redundancy_router.hosting_info.hosting_device_id)
self._update_router_no_notify(
e_context, r_b_db.redundancy_router_id,
{'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}})
rr_ids.append(r_b_db.redundancy_router_id)
self.notify_routers_updated(e_context, rr_ids) | To be called in update_router() if the router gateway is to change
BEFORE router has been updated in DB . | entailment |
def _update_redundancy_routers(self, context, updated_router,
update_specification, requested_ha_settings,
updated_router_db, gateway_changed):
"""To be called in update_router() AFTER router has been
updated in DB.
"""
router_requested = update_specification['router']
ha_settings_db = updated_router_db.ha_settings
ha_enabled_requested = requested_ha_settings.get(ha.ENABLED, False)
if not (updated_router[ha.ENABLED] or ha_enabled_requested):
# No HA currently enabled and no HA requested so we're done
return
# The redundancy routers need interfaces on the same networks as the
# user visible router.
ports = self._get_router_interfaces(updated_router_db)
e_context = context.elevated()
if not updated_router[ha.ENABLED] and ha_enabled_requested:
# No HA currently enabled but HA requested
router_requested.update(requested_ha_settings)
router_requested[EXTERNAL_GW_INFO] = (
updated_router[EXTERNAL_GW_INFO])
requested_ha_settings = self._ensure_create_ha_compliant(
router_requested, updated_router_db.hosting_info.router_type)
self._create_redundancy_routers(
e_context, updated_router, requested_ha_settings,
updated_router_db, ports, expire_db=True)
return
rr_ids = self._get_redundancy_router_ids(context, updated_router['id'])
ha_details_update_spec = requested_ha_settings.get(ha.DETAILS)
if (updated_router[ha.ENABLED] and not requested_ha_settings.get(
ha.ENABLED, updated_router[ha.ENABLED])):
# HA currently enabled but HA disable requested
# delete ha settings and extra port for gateway (VIP) port
self._delete_ha_group(e_context, updated_router_db.gw_port_id)
self._remove_redundancy_routers(e_context, rr_ids, ports, True)
with context.session.begin(subtransactions=True):
context.session.delete(ha_settings_db)
elif ha_details_update_spec:
# HA currently enabled and HA setting update (other than
# disable HA) requested
old_redundancy_level = ha_settings_db.redundancy_level
ha_settings_db.update(ha_details_update_spec)
diff = (ha_details_update_spec.get(ha.REDUNDANCY_LEVEL,
old_redundancy_level) -
old_redundancy_level)
with context.session.begin(subtransactions=True):
context.session.add(ha_settings_db)
if diff < 0:
# Remove -diff redundancy routers
#TODO(bobmel): Ensure currently active router is excluded
to_remove = rr_ids[len(rr_ids) + diff:]
rr_ids = rr_ids[:len(rr_ids) + diff]
self._remove_redundancy_routers(e_context, to_remove, ports)
elif diff > 0:
# Add diff redundancy routers
start = old_redundancy_level + 1
stop = start + diff
self._add_redundancy_routers(e_context, start, stop,
updated_router, ports,
ha_settings_db, False)
if gateway_changed is True:
self._change_ha_for_gateway(e_context, updated_router,
updated_router_db, ha_settings_db,
router_requested, expire=True)
else:
# Notify redundancy routers about changes
self.notify_routers_updated(e_context, rr_ids)
elif gateway_changed is True:
# HA currently enabled (and to remain so) nor any HA setting update
# and gateway has changed
self._change_ha_for_gateway(e_context, updated_router,
updated_router_db, ha_settings_db,
router_requested)
# pick up updates to other attributes where it makes sense
# and push - right now it is only admin_state_up.
other_updates_spec = {'router': {}}
if 'admin_state_up' in update_specification['router']:
other_updates_spec['router']['admin_state_up'] = (
update_specification['router']['admin_state_up'])
if 'name' in update_specification['router']:
other_updates_spec['router']['name'] = (
update_specification['router']['name'])
if (other_updates_spec['router'] or
'routes' in update_specification['router']):
self._process_other_router_updates(e_context, updated_router_db,
other_updates_spec)
# Ensure we get latest state from DB
context.session.expire(updated_router_db)
self._extend_router_dict_ha(updated_router, updated_router_db) | To be called in update_router() AFTER router has been
updated in DB. | entailment |
def _add_redundancy_routers(self, context, start_index, stop_index,
user_visible_router, ports=None,
ha_settings_db=None, create_ha_group=True):
"""Creates a redundancy router and its interfaces on
the specified subnets.
"""
priority = (DEFAULT_MASTER_PRIORITY +
(start_index - 1) * PRIORITY_INCREASE_STEP)
r = copy.deepcopy(user_visible_router)
# No tenant_id so redundancy routers are hidden from user
r['tenant_id'] = ''
name = r['name']
redundancy_r_ids = []
for i in range(start_index, stop_index):
del r['id']
# We don't replicate the user visible router's routes, instead
# they are populated to redundancy routers for get router(s) ops
r.pop('routes', None)
# Redundancy routers will never have a route spec themselves
# The redundancy routers must have HA disabled
r[ha.ENABLED] = False
r['name'] = name + REDUNDANCY_ROUTER_SUFFIX + str(i)
# set role so that purpose of this router can be easily determined
r[routerrole.ROUTER_ROLE_ATTR] = ROUTER_ROLE_HA_REDUNDANCY
gw_info = r[EXTERNAL_GW_INFO]
if gw_info and gw_info['external_fixed_ips']:
# Ensure ip addresses are not specified as they cannot be
# same as visible router's ip addresses.
for e_fixed_ip in gw_info['external_fixed_ips']:
e_fixed_ip.pop('ip_address', None)
r = self.create_router(context, {'router': r})
LOG.debug("Created redundancy router %(index)d with router id "
"%(r_id)s", {'index': i, 'r_id': r['id']})
priority += PRIORITY_INCREASE_STEP
r_b_b = RouterRedundancyBinding(
redundancy_router_id=r['id'],
priority=priority,
user_router_id=user_visible_router['id'])
context.session.add(r_b_b)
redundancy_r_ids.append(r['id'])
for port_db in ports or []:
port = self._core_plugin._make_port_dict(port_db)
self._add_redundancy_router_interfaces(
context, user_visible_router, None, port,
redundancy_r_ids, ha_settings_db, create_ha_group) | Creates a redundancy router and its interfaces on
the specified subnets. | entailment |
def _remove_redundancy_routers(self, context, router_ids, ports,
delete_ha_groups=False):
"""Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves.
"""
subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']}
for port in ports]
for r_id in router_ids:
for i in range(len(subnets_info)):
self.remove_router_interface(context, r_id, subnets_info[i])
LOG.debug("Removed interface on %(s_id)s to redundancy router "
"with %(r_id)s",
{'s_id': ports[i]['network_id'], 'r_id': r_id})
# There is only one ha group per network so only delete once
if delete_ha_groups and r_id == router_ids[0]:
self._delete_ha_group(context, ports[i]['id'])
self.delete_router(context, r_id)
LOG.debug("Deleted redundancy router %s", r_id) | Deletes all interfaces of the specified redundancy routers
and then the redundancy routers themselves. | entailment |
def _delete_redundancy_routers(self, context, router_db):
"""To be called in delete_router() BEFORE router has been
deleted in DB. The router should have not interfaces.
"""
e_context = context.elevated()
for binding in router_db.redundancy_bindings:
self.delete_router(e_context, binding.redundancy_router_id)
LOG.debug("Deleted redundancy router %s",
binding.redundancy_router_id)
if router_db.gw_port_id:
# delete ha settings and extra port for gateway (VIP) port
self._delete_ha_group(e_context, router_db.gw_port_id) | To be called in delete_router() BEFORE router has been
deleted in DB. The router should have not interfaces. | entailment |
def _add_redundancy_router_interfaces(self, context, router, itfc_info,
new_port, redundancy_router_ids=None,
ha_settings_db=None,
create_ha_group=True):
"""To be called in add_router_interface() AFTER interface has been
added to router in DB.
"""
# There are essentially three cases where we add interface to a
# redundancy router:
# 1. HA is enabled on a user visible router that has one or more
# interfaces.
# 2. Redundancy level is increased so one or more redundancy routers
# are added.
# 3. An interface is added to a user visible router.
#
# For 1: An HA GROUP MUST BE CREATED and EXTRA PORTS MUST BE CREATED
# for each redundancy router. The id of extra port should be
# specified in the interface_info argument of the
# add_router_interface call so that we ADD BY PORT.
# For 2: HA group need NOT be created as it will already exist (since
# there is already at least on redundancy router). EXTRA PORTS
# MUST BE CREATED for each added redundancy router. The id
# of extra port should be specified in the interface_info
# argument of the add_router_interface call so that we ADD BY
# PORT.
# For 3: if the interface for the user_visible_router was added by ...
# a) PORT: An HA GROUP MUST BE CREATED and and EXTRA PORTS MUST BE
# CREATED for each redundancy router. The id of extra port
# should be specified in the interface_info argument of
# the add_router_interface call so that we ADD BY PORT.
# b) SUBNET: There are two cases to consider. If the added interface
# of the user_visible_router has ...
# b1) 1 SUBNET: An HA GROUP MUST BE CREATED and and EXTRA
# PORTS MUST BE CREATED for each redundancy
# router. The id of extra port should be
# specified in the interface_info argument of
# the add_router_interface call so we ADD BY
# PORT.
# b2) >1 SUBNETS: HA group need NOT be created as it will
# already exist (since the redundancy routers
# should already have extra ports to which the
# (IPv6) subnet is added. Extra ports need
# thus NOT be created. The subnet id should be
# added to the existing extra ports.
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
add_by_subnet = (itfc_info is not None and 'subnet_id' in itfc_info and
len(new_port['fixed_ips']) > 1)
if (add_by_subnet is False or (itfc_info is None and
create_ha_group is True)):
# generate ha settings and extra port for router (VIP) port
self._create_ha_group(e_context, router, new_port, ha_settings_db)
fixed_ips = self._get_fixed_ips_subnets(new_port['fixed_ips'])
for r_id in (redundancy_router_ids or
self._get_redundancy_router_ids(e_context, router_id)):
if add_by_subnet is True:
# need to add subnet to redundancy router port
ports = self._core_plugin.get_ports(
e_context,
filters={'device_id': [r_id],
'network_id': [new_port['network_id']]},
fields=['fixed_ips', 'id'])
redundancy_port = ports[0]
fixed_ips = redundancy_port['fixed_ips']
fixed_ip = {'subnet_id': itfc_info['subnet_id']}
fixed_ips.append(fixed_ip)
self._core_plugin.update_port(
e_context, redundancy_port['id'],
{'port': {'fixed_ips': fixed_ips}})
else:
redundancy_port = self._create_hidden_port(
e_context, new_port['network_id'], '', fixed_ips)
interface_info = {'port_id': redundancy_port['id']}
self.add_router_interface(e_context, r_id, interface_info) | To be called in add_router_interface() AFTER interface has been
added to router in DB. | entailment |
def _update_redundancy_router_interfaces(self, context, router,
port, modified_port_data,
redundancy_router_ids=None,
ha_settings_db=None):
"""To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status
"""
router_id = router['id']
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [port['network_id']]},
fields=['device_id', 'id'])
for port_info in port_info_list:
self._core_plugin.update_port(e_context, port_info['id'],
modified_port_data)
self._update_hidden_port(e_context, port['id'], modified_port_data) | To be called when the router interfaces are updated,
like in the case of change in port admin_state_up status | entailment |
def _remove_redundancy_router_interfaces(self, context, router_id,
old_port):
"""To be called in delete_router_interface() BEFORE interface has been
removed from router in DB.
"""
ha_settings = self._get_ha_settings_by_router_id(context, router_id)
if ha_settings is None or old_port is None:
return
e_context = context.elevated()
rr_ids = self._get_redundancy_router_ids(e_context, router_id)
port_info_list = self._core_plugin.get_ports(
e_context, filters={'device_id': rr_ids,
'network_id': [old_port['network_id']]},
fields=['device_id', 'fixed_ips', 'id'])
subnet_id = old_port['fixed_ips'][0]['subnet_id']
for port_info in port_info_list:
if port_info['fixed_ips'][0]['subnet_id'] == subnet_id:
interface_info = {'port_id': port_info['id']}
self.remove_router_interface(e_context, port_info['device_id'],
interface_info)
self._delete_ha_group(e_context, old_port['id']) | To be called in delete_router_interface() BEFORE interface has been
removed from router in DB. | entailment |
def _redundancy_routers_for_floatingip(
self, context, router_id, redundancy_router_ids=None,
ha_settings_db=None):
"""To be called in update_floatingip() to get the
redundant router ids.
"""
if ha_settings_db is None:
ha_settings_db = self._get_ha_settings_by_router_id(context,
router_id)
if ha_settings_db is None:
return
e_context = context.elevated()
router_ids = []
for r_id in (redundancy_router_ids or
self._get_redundancy_router_ids(e_context, router_id)):
router_ids.append(r_id)
return router_ids | To be called in update_floatingip() to get the
redundant router ids. | entailment |
def _populate_ha_information(self, context, router):
"""To be called when router information, including router interface
list, (for the l3_cfg_agent) has been collected so it is extended
with ha information.
"""
r_r_b = self._get_redundancy_router_bindings(
context, redundancy_router_id=router['id'])
if not r_r_b:
if router[ha.ENABLED]:
# The router is a user visible router with HA enabled.
user_router_id = router['id']
fips = []
else:
# The router is a user visible router with HA disabled.
# Nothing more to do here.
return
else:
# The router is a redundancy router.
# Need to fetch floatingip configurations from user visible router
# so they can be added to the redundancy routers.
user_router_id = r_r_b[0].user_router_id
fips = self.get_floatingips(context,
{'router_id': [user_router_id]})
if router['id'] != user_router_id:
# We add the HA settings from user visible router to
# its redundancy routers.
user_router_db = self._get_router(context, user_router_id)
self._extend_router_dict_ha(router, user_router_db)
# The interfaces of the user visible router must use the
# IP configuration of the extra ports in the HA groups.
hag_dbs = self._get_subnet_id_indexed_ha_groups(context,
user_router_id)
e_context = context.elevated()
if router.get('gw_port'):
modified_interfaces = []
interface_port = self._populate_port_ha_information(
e_context, router['gw_port'], router['id'], hag_dbs,
user_router_id, modified_interfaces)
if not interface_port:
# The router has a gw_port but cannot find the port info yet
# so mark this router to have incomplete info and bail.
# The cfg_agent puts this in the updated_routers to ask again.
router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE
return
if modified_interfaces:
router['gw_port'] = interface_port
modified_interfaces = []
for itfc in router.get(bc.constants.INTERFACE_KEY, []):
interface_port = self._populate_port_ha_information(
e_context, itfc, router['id'], hag_dbs, user_router_id,
modified_interfaces)
if not interface_port:
# the router has interfaces but cannot find the port info yet
# so mark this router to have incomplete info and bail
# the cfg_agent will put this in the updated_list to ask again
router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE
return
if modified_interfaces:
router[bc.constants.INTERFACE_KEY] = modified_interfaces
if fips:
router[bc.constants.FLOATINGIP_KEY] = fips | To be called when router information, including router interface
list, (for the l3_cfg_agent) has been collected so it is extended
with ha information. | entailment |
def _create_hidden_port(self, context, network_id, device_id, fixed_ips,
port_type=DEVICE_OWNER_ROUTER_INTF):
"""Creates port used specially for HA purposes."""
port = {'port': {
'tenant_id': '', # intentionally not set
'network_id': network_id,
'mac_address': ATTR_NOT_SPECIFIED,
'fixed_ips': fixed_ips,
'device_id': device_id,
'device_owner': port_type,
'admin_state_up': True,
'name': ''}}
if extensions.is_extension_supported(self._core_plugin,
"dns-integration"):
port['port'].update(dns_name='')
core_plugin = bc.get_plugin()
return core_plugin.create_port(context, port) | Creates port used specially for HA purposes. | entailment |
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them).
"""
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet['id']
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW
).distinct()
# Ensure that redundancy routers (in a ha group) are not returned,
# since only the user visible router should have floatingips.
# This can be done by checking that the id of routers does not
# appear in the 'redundancy_router_id' column in the
# 'cisco_router_redundancy_bindings' table.
routerport_qry = routerport_qry.outerjoin(
RouterRedundancyBinding,
RouterRedundancyBinding.redundancy_router_id ==
RouterPort.router_id)
routerport_qry = routerport_qry.filter(
RouterRedundancyBinding.redundancy_router_id == expr.null())
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == internal_subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
raise l3_exceptions.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id']) | We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them). | entailment |
def allocate_fw_dev(self, fw_id):
"""Allocate firewall device.
Allocate the first Firewall device which has resources available.
"""
for cnt in self.res:
used = self.res.get(cnt).get('used')
if used < self.res.get(cnt).get('quota'):
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | Allocate firewall device.
Allocate the first Firewall device which has resources available. | entailment |
def populate_fw_dev(self, fw_id, mgmt_ip, new):
"""Populate the class after a restart. """
for cnt in self.res:
used = self.res.get(cnt).get('used')
if mgmt_ip == self.res[cnt].get('mgmt_ip'):
if new:
self.res[cnt]['used'] = used + 1
self.res[cnt]['fw_id_lst'].append(fw_id)
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | Populate the class after a restart. | entailment |
def get_fw_dev_map(self, fw_id):
"""Return the object dict and mgmt ip for a firewall. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
return self.res[cnt].get('obj_dict'), (
self.res[cnt].get('mgmt_ip'))
return None, None | Return the object dict and mgmt ip for a firewall. | entailment |
def deallocate_fw_dev(self, fw_id):
"""Release the firewall resource. """
for cnt in self.res:
if fw_id in self.res.get(cnt).get('fw_id_lst'):
self.res[cnt]['used'] = self.res[cnt]['used'] - 1
self.res.get(cnt).get('fw_id_lst').remove(fw_id)
return | Release the firewall resource. | entailment |
def populate_local_sch_cache(self, fw_dict):
"""Populate the local cache from FW DB after restart. """
for fw_id in fw_dict:
fw_data = fw_dict.get(fw_id)
mgmt_ip = fw_data.get('fw_mgmt_ip')
dev_status = fw_data.get('device_status')
if dev_status == 'SUCCESS':
new = True
else:
new = False
if mgmt_ip is not None:
drvr_dict, mgmt_ip = self.sched_obj.populate_fw_dev(fw_id,
mgmt_ip,
new)
if drvr_dict is None or mgmt_ip is None:
LOG.info("Pop cache for FW sch: drvr_dict or mgmt_ip "
"is None") | Populate the local cache from FW DB after restart. | entailment |
def drvr_initialize(self, cfg):
"""Initialize the driver routines. """
cnt = 0
for ip in self.obj_dict:
cfg_dict = {}
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
cfg_dict['mgmt_ip_addr'] = ip
if self.user_list is not None:
cfg_dict['user'] = self.user_list[cnt]
if self.pwd_list is not None:
cfg_dict['pwd'] = self.pwd_list[cnt]
if self.interface_in_list is not None:
cfg_dict['interface_in'] = self.interface_in_list[cnt]
if self.interface_out_list is not None:
cfg_dict['interface_out'] = self.interface_out_list[cnt]
drvr_obj.initialize(cfg_dict)
cnt = cnt + 1 | Initialize the driver routines. | entailment |
def populate_event_que(self, que_obj):
"""Populates the event queue object.
This is for sending router events to event handler.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_event_que(que_obj) | Populates the event queue object.
This is for sending router events to event handler. | entailment |
def populate_dcnm_obj(self, dcnm_obj):
"""Populates the DCNM object. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
drvr_obj.populate_dcnm_obj(dcnm_obj) | Populates the DCNM object. | entailment |
def is_device_virtual(self):
"""Returns if the device is physical or virtual. """
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.is_device_virtual()
# No way to pin a device as of now, so return the first
# TODO(padkrish)
return ret | Returns if the device is physical or virtual. | entailment |
def create_fw_device(self, tenant_id, fw_id, data):
"""Creates the Firewall. """
drvr_dict, mgmt_ip = self.sched_obj.allocate_fw_dev(fw_id)
if drvr_dict is not None and mgmt_ip is not None:
self.update_fw_db_mgmt_ip(fw_id, mgmt_ip)
ret = drvr_dict.get('drvr_obj').create_fw(tenant_id, data)
if not ret:
self.sched_obj.deallocate_fw_dev(fw_id)
return ret
else:
return False | Creates the Firewall. | entailment |
def delete_fw_device(self, tenant_id, fw_id, data):
"""Deletes the Firewall. """
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id)
ret = drvr_dict.get('drvr_obj').delete_fw(tenant_id, data)
# FW DB gets deleted, so no need to remove the MGMT IP
if ret:
self.sched_obj.deallocate_fw_dev(fw_id)
return ret | Deletes the Firewall. | entailment |
def modify_fw_device(self, tenant_id, fw_id, data):
"""Modifies the firewall cfg. """
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id)
return drvr_dict.get('drvr_obj').modify_fw(tenant_id, data) | Modifies the firewall cfg. | entailment |
def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Notification for Network create.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_create_notif(tenant_id, tenant_name, cidr)
LOG.info("Driver with IP %(ip)s return %(ret)s",
{'ip': ip, 'ret': ret}) | Notification for Network create.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them. | entailment |
def network_delete_notif(self, tenant_id, tenant_name, net_id):
"""Notification for Network delete.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them.
"""
for ip in self.obj_dict:
drvr_obj = self.obj_dict.get(ip).get('drvr_obj')
ret = drvr_obj.network_delete_notif(tenant_id, tenant_name,
net_id)
LOG.info("Driver with IP %(ip)s return %(ret)s for network "
"delete notification", {'ip': ip, 'ret': ret}) | Notification for Network delete.
Since FW ID not present, it's not possible to know which FW instance
to call. So, calling everyone, each instance will figure out if it
applies to them. | entailment |
def get_cfg_router_ids(self, context, host, router_ids=None,
hosting_device_ids=None):
"""Returns IDs of routers scheduled to l3 agent on <host>"""
return self._l3plugin.cfg_list_router_ids_on_host(context, host,
router_ids,
hosting_device_ids) | Returns IDs of routers scheduled to l3 agent on <host> | entailment |
def cfg_sync_routers(self, context, host, router_ids=None,
hosting_device_ids=None):
"""Sync routers according to filters to a specific Cisco cfg agent.
:param context: contains user information
:param host: originator of callback
:param router_ids: list of router ids to return information about
:param hosting_device_ids: list of hosting device ids to get
routers for.
:returns: a list of routers with their hosting devices, interfaces and
floating_ips
"""
adm_context = bc.context.get_admin_context()
try:
routers = (
self._l3plugin.list_active_sync_routers_on_hosting_devices(
adm_context, host, router_ids, hosting_device_ids))
except AttributeError:
routers = []
LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s',
{'agt': host, 'routers': jsonutils.dumps(routers, indent=5)})
return routers | Sync routers according to filters to a specific Cisco cfg agent.
:param context: contains user information
:param host: originator of callback
:param router_ids: list of router ids to return information about
:param hosting_device_ids: list of hosting device ids to get
routers for.
:returns: a list of routers with their hosting devices, interfaces and
floating_ips | entailment |
def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses):
"""Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value
"""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self._l3plugin.update_floatingip_status(
context, floatingip_id, status)
except l3_exceptions.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self._l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
LOG.debug("update_fip_statuses: disable: %s", fip_id)
self._l3plugin.update_floatingip_status(
context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN) | Update operational status for one or several floating IPs.
This is called by Cisco cfg agent to update the status of one or
several floatingips.
:param context: contains user information
:param router_id: id of router associated with the floatingips
:param router_id: dict with floatingip_id as key and status as value | entailment |
def update_port_statuses_cfg(self, context, port_ids, status):
"""Update the operational statuses of a list of router ports.
This is called by the Cisco cfg agent to update the status of a list
of ports.
:param context: contains user information
:param port_ids: list of ids of all the ports for the given status
:param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
"""
self._l3plugin.update_router_port_statuses(context, port_ids,
status) | Update the operational statuses of a list of router ports.
This is called by the Cisco cfg agent to update the status of a list
of ports.
:param context: contains user information
:param port_ids: list of ids of all the ports for the given status
:param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN. | entailment |
def get_mysql_credentials(cfg_file):
"""Get the credentials and database name from options in config file."""
try:
parser = ConfigParser.ConfigParser()
cfg_fp = open(cfg_file)
parser.readfp(cfg_fp)
cfg_fp.close()
except ConfigParser.NoOptionError:
cfg_fp.close()
print('Failed to find mysql connections credentials.')
sys.exit(1)
except IOError:
print('ERROR: Cannot open %s.', cfg_file)
sys.exit(1)
value = parser.get('dfa_mysql', 'connection')
try:
# Find location of pattern in connection parameter as shown below:
# http://username:password@host/databasename?characterset=encoding'
sobj = re.search(r"(://).*(@).*(/).*(\?)", value)
# The list parameter contains:
# indices[0], is the index of '://'
# indices[1], is the index of '@'
# indices[2], is the index of '/'
# indices[3], is the index of '?'
indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)]
# Get the credentials
cred = value[indices[0] + 3:indices[1]].split(':')
# Get the host name
host = value[indices[1] + 1:indices[2]]
# Get the database name
db_name = value[indices[2] + 1:indices[3]]
# Get the character encoding
charset = value[indices[3] + 1:].split('=')[1]
return cred[0], cred[1], host, db_name, charset
except (ValueError, IndexError, AttributeError):
print('Failed to find mysql connections credentials.')
sys.exit(1) | Get the credentials and database name from options in config file. | entailment |
def modify_conf(cfgfile, service_name, outfn):
"""Modify config file neutron and keystone to include enabler options."""
if not cfgfile or not outfn:
print('ERROR: There is no config file.')
sys.exit(0)
options = service_options[service_name]
with open(cfgfile, 'r') as cf:
lines = cf.readlines()
for opt in options:
op = opt.get('option')
res = [line for line in lines if line.startswith(op)]
if len(res) > 1:
print('ERROR: There are more than one %s option.' % res)
sys.exit(0)
if res:
(op, sep, val) = (res[0].strip('\n').replace(' ', '').
partition('='))
new_val = None
if opt.get('is_list'):
# Value for this option can contain list of values.
# Append the value if it does not exist.
if not any(opt.get('value') == value for value in
val.split(',')):
new_val = ','.join((val, opt.get('value')))
else:
if val != opt.get('value'):
new_val = opt.get('value')
if new_val:
opt_idx = lines.index(res[0])
# The setting is different, replace it with new one.
lines.pop(opt_idx)
lines.insert(opt_idx, '='.join((opt.get('option'),
new_val + '\n')))
else:
# Option does not exist. Add the option.
try:
sec_idx = lines.index('[' + opt.get('section') + ']\n')
lines.insert(sec_idx + 1, '='.join(
(opt.get('option'), opt.get('value') + '\n')))
except ValueError:
print('Invalid %s section name.' % opt.get('section'))
sys.exit(0)
with open(outfn, 'w') as fwp:
all_lines = ''
for line in lines:
all_lines += line
fwp.write(all_lines) | Modify config file neutron and keystone to include enabler options. | entailment |
def get_all_hosting_devices(self, context):
"""Get a list of all hosting devices."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_all_hosting_devices',
host=self.host) | Get a list of all hosting devices. | entailment |
def get_all_hosted_routers(self, context):
"""Make a remote process call to retrieve the sync data for
routers that have been scheduled to a hosting device.
:param context: session context
"""
cctxt = self.client.prepare()
return cctxt.call(context, 'cfg_sync_all_hosted_routers',
host=self.host) | Make a remote process call to retrieve the sync data for
routers that have been scheduled to a hosting device.
:param context: session context | entailment |
def get_hardware_router_type_id(self, context):
"""Get the ID for the ASR1k hardware router type."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hardware_router_type_id',
host=self.host) | Get the ID for the ASR1k hardware router type. | entailment |
def heartbeat(self, context, msg):
"""Process heartbeat message from agents on compute nodes."""
args = jsonutils.loads(msg)
when = args.get('when')
agent = args.get('agent')
# The configurations in here, only used once when creating entry
# for an agent in DB for the first time.
configurations = {'uplink': ''}
LOG.debug('heartbeat received: %(time)s - %(agent)s', (
{'time': when, 'agent': agent}))
if self.obj.neutron_event:
self.obj.neutron_event.create_rpc_client(agent)
# Other option is to add the event to the queue for processig it later.
self.obj.update_agent_status(agent, when)
# Update the agents database.
agent_info = dict(timestamp=utils.utc_time(when), host=agent,
config=jsonutils.dumps(configurations))
self.obj.update_agent_db(agent_info) | Process heartbeat message from agents on compute nodes. | entailment |
def request_uplink_info(self, context, agent):
"""Process uplink message from an agent."""
LOG.debug('request_uplink_info from %(agent)s', {'agent': agent})
# Add the request into queue for processing.
event_type = 'agent.request.uplink'
payload = {'agent': agent}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START + 1
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request uplink info into queue.')
return 0 | Process uplink message from an agent. | entailment |
def set_static_ip_address(self, context, msg):
"""Process request for setting rules in iptables.
In cases that static ip address is assigned for a VM, it is needed
to update the iptables rule for that address.
"""
args = jsonutils.loads(msg)
macaddr = args.get('mac')
ipaddr = args.get('ip')
LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', (
{'mac': macaddr, 'ip': ipaddr}))
# Add the request into queue for processing.
event_type = 'cli.static_ip.set'
payload = {'mac': macaddr, 'ip': ipaddr}
timestamp = time.ctime()
data = (event_type, payload)
pri = self.obj.PRI_LOW_START
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request to add static ip into queue.')
return 0 | Process request for setting rules in iptables.
In cases that static ip address is assigned for a VM, it is needed
to update the iptables rule for that address. | entailment |
def update_vm_result(self, context, msg):
"""Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info.
"""
args = jsonutils.loads(msg)
agent = context.get('agent')
port_id = args.get('port_uuid')
result = args.get('result')
LOG.debug('update_vm_result received from %(agent)s: '
'%(port_id)s %(result)s', {'agent': agent,
'port_id': port_id,
'result': result})
# Add the request into queue for processing.
event_type = 'agent.vm_result.update'
payload = {'port_id': port_id, 'result': result}
timestamp = time.ctime()
data = (event_type, payload)
# TODO(nlahouti) use value defined in constants
pri = self.obj.PRI_LOW_START + 10
self.obj.pqueue.put((pri, timestamp, data))
LOG.debug('Added request vm result update into queue.')
return 0 | Update VM's result field in the DB.
The result reflects the success of failure of operation when an
agent processes the vm info. | entailment |
def _setup_rpc(self):
"""Setup RPC server for dfa server."""
endpoints = RpcCallBacks(self)
self.server = rpc.DfaRpcServer(self.ser_q, self._host,
self.cfg.dfa_rpc.transport_url,
endpoints,
exchange=constants.DFA_EXCHANGE) | Setup RPC server for dfa server. | entailment |
def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max):
"""Register segmentation id pool with DCNM. """
orch_id = cfg.dcnm.orchestrator_id
try:
segid_range = self.dcnm_client.get_segmentid_range(orch_id)
if segid_range is None:
self.dcnm_client.set_segmentid_range(orch_id, seg_id_min,
seg_id_max)
else:
conf_min, _, conf_max = segid_range[
"segmentIdRanges"].partition("-")
if int(conf_min) != seg_id_min or int(conf_max) != seg_id_max:
self.dcnm_client.update_segmentid_range(orch_id,
seg_id_min,
seg_id_max)
except dexc.DfaClientRequestFailed as exc:
LOG.error("Segment ID range could not be created/updated"
" on DCNM: %s", exc)
raise SystemExit(exc) | Register segmentation id pool with DCNM. | entailment |
def project_create_func(self, proj_id, proj=None):
"""Create project given project uuid"""
if self.get_project_name(proj_id):
LOG.info("project %s exists, returning", proj_id)
return
if not proj:
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
# In the project name, dci_id may be included. Check if this is the
# case and extact the dci_id from the name, and provide dci_id when
# creating the project.
proj_name, dci_id = self._get_dci_id_and_proj_name(proj.name)
if proj_name in reserved_project_name:
proj_name = "_".join((proj_name, self.cfg.dcnm.orchestrator_id))
# The default partition name is 'os' (i.e. openstack) which reflects
# it is created by openstack.
part_name = self.cfg.dcnm.default_partition_name
if len(':'.join((proj_name, part_name))) > 32:
LOG.error('Invalid project name length: %s. The length of '
'org:part name is greater than 32',
len(':'.join((proj_name, part_name))))
return
try:
self.dcnm_client.create_project(self.cfg.dcnm.orchestrator_id,
proj_name, part_name, dci_id,
proj.description)
except dexc.DfaClientRequestFailed:
# Failed to send create project in DCNM.
# Save the info and mark it as failure and retry it later.
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id,
result=constants.CREATE_FAIL)
LOG.error("Failed to create project %s on DCNM.", proj_name)
else:
self.update_project_info_cache(proj_id, name=proj_name,
dci_id=dci_id)
LOG.debug('project %(name)s %(dci)s %(desc)s', (
{'name': proj_name, 'dci': dci_id, 'desc': proj.description}))
self.project_create_notif(proj_id, proj_name) | Create project given project uuid | entailment |
def project_create_event(self, proj_info):
"""Create project."""
LOG.debug("Processing create %(proj)s event.", {'proj': proj_info})
proj_id = proj_info.get('resource_info')
self.project_create_func(proj_id) | Create project. | entailment |
def project_update_event(self, proj_info):
"""Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM.
"""
LOG.debug("Processing project_update_event %(proj)s.",
{'proj': proj_info})
proj_id = proj_info.get('resource_info')
try:
proj = self.keystone_event._service.projects.get(proj_id)
except Exception:
LOG.error("Failed to find project %s.", proj_id)
return
new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name)
# Check if project name and dci_id are the same, there is no change.
orig_proj_name = self.get_project_name(proj_id)
orig_dci_id = self.get_dci_id(proj_id)
if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id:
# This is an invalid update event.
LOG.warning('Project update event for %(proj)s is received '
'without changing in the project name: '
'%(orig_proj)s. Ignoring the event.',
{'proj': proj_id, 'orig_proj': orig_proj_name})
return
if orig_proj_name != new_proj_name:
# Project has new name and in DCNM the name of project cannot be
# modified. It is an invalid update. Do not process the event.
LOG.debug('Update request cannot be processed as name of project'
' is changed: %(proj)s %(orig_name)s %(orig_dci)s to '
'%(new_name)s %(new_dci)s.', (
{'proj': proj_id, 'orig_name': orig_proj_name,
'orig_dci': orig_dci_id, 'new_name': new_proj_name,
'new_dci': new_dci_id}))
return
# Valid update request.
LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to '
'%(new_dci)s.', {'proj': proj_id,
'orig_dci': orig_dci_id,
'new_dci': new_dci_id})
try:
self.dcnm_client.update_project(new_proj_name,
self.cfg.dcnm.
default_partition_name,
dci_id=new_dci_id)
except dexc.DfaClientRequestFailed:
# Failed to update project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to update project %s on DCNM.",
new_proj_name)
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update',
result=constants.UPDATE_FAIL)
else:
self.update_project_info_cache(proj_id, name=new_proj_name,
dci_id=new_dci_id,
opcode='update')
LOG.debug('Updated project %(proj)s %(name)s.',
{'proj': proj_id, 'name': proj.name}) | Process project update event.
There could be change in project name. DCNM doesn't allow change in
project (a.k.a tenant). This event may be received for the DCI update.
If the change is for DCI, update the DCI portion of the project name
and send the update event to the DCNM. | entailment |
def project_delete_event(self, proj_info):
"""Process project delete event."""
LOG.debug("Processing project_delete_event...")
proj_id = proj_info.get('resource_info')
proj_name = self.get_project_name(proj_id)
if proj_name:
try:
self.dcnm_client.delete_project(proj_name,
self.cfg.dcnm.
default_partition_name)
except dexc.DfaClientRequestFailed:
# Failed to delete project in DCNM.
# Save the info and mark it as failure and retry it later.
LOG.error("Failed to create project %s on DCNM.",
proj_name)
self.update_project_info_cache(proj_id, name=proj_name,
opcode='delete',
result=constants.DELETE_FAIL)
else:
self.update_project_info_cache(proj_id, opcode='delete')
LOG.debug('Deleted project:%s', proj_name)
self.project_delete_notif(proj_id, proj_name) | Process project delete event. | entailment |
def subnet_create_event(self, subnet_info):
"""Process subnet create event."""
subnet = subnet_info.get('subnet')
if subnet:
self.create_subnet(subnet)
else:
# Check whether request is for subnets.
subnets = subnet_info.get('subnets')
if subnets:
for subnet in subnets:
self.create_subnet(subnet) | Process subnet create event. | entailment |
def create_subnet(self, snet):
"""Create subnet."""
snet_id = snet.get('id')
# This checks if the source of the subnet creation is FW,
# If yes, this event is ignored.
if self.fw_api.is_subnet_source_fw(snet.get('tenant_id'),
snet.get('cidr')):
LOG.info("Service subnet %s, returning", snet.get('cidr'))
return
if snet_id not in self.subnet:
self.subnet[snet_id] = {}
self.subnet[snet_id].update(snet)
net = self.network.get(self.subnet[snet_id].get('network_id'))
if not net:
LOG.error('Network %(network_id)s does not exist.',
{'network_id': self.subnet[snet_id].get('network_id')})
return
# Check if the network is created by DCNM.
query_net = self.get_network(net.get('id'))
if query_net.result != constants.SUBNET_PENDING:
LOG.info("Subnet exists, returning")
return
if query_net and query_net.source.lower() == 'dcnm':
# The network is created by DCNM.
# No need to process this event.
LOG.info('create_subnet: network %(name)s '
'was created by DCNM. Ignoring processing the '
'event.', {'name': query_net.name})
return
tenant_name = self.get_project_name(snet['tenant_id'])
subnet = utils.Dict2Obj(snet)
dcnm_net = utils.Dict2Obj(net)
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': subnet.tenant_id})
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
return
try:
self.dcnm_client.create_network(tenant_name, dcnm_net, subnet,
self.dcnm_dhcp)
self.update_network_db(net.get('id'), constants.RESULT_SUCCESS)
except dexc.DfaClientRequestFailed:
LOG.exception('Failed to create network %(net)s.',
{'net': dcnm_net.name})
# Update network database with failure result.
self.update_network_db(dcnm_net.id, constants.CREATE_FAIL)
self.network_sub_create_notif(snet.get('tenant_id'), tenant_name,
snet.get('cidr')) | Create subnet. | entailment |
def _get_segmentation_id(self, netid, segid, source):
"""Allocate segmentation id. """
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid,
source=source) | Allocate segmentation id. | entailment |
def network_create_func(self, net):
"""Create network in database and dcnm
:param net: network dictionary
"""
net_id = net['id']
net_name = net.get('name')
network_db_elem = self.get_network(net_id)
# Check if the source of network creation is FW and if yes, skip
# this event.
# Check if there's a way to read the DB from service class
# TODO(padkrish)
if self.fw_api.is_network_source_fw(network_db_elem, net_name):
LOG.info("Service network %s, returning", net_name)
return
if not network_db_elem:
self.network[net_id] = {}
self.network[net_id].update(net)
net_name = net.get('name')
tenant_id = net.get('tenant_id')
# Extract segmentation_id from the network name
net_ext_name = self.cfg.dcnm.dcnm_net_ext
nobj = re.search(net_ext_name, net_name)
try:
seg_id = int((net_name[nobj.start(0) + len(net_ext_name) - 1:]
if nobj else None))
except (IndexError, TypeError, ValueError):
seg_id = None
# Check if network is already created.
query_net = self.get_network_by_segid(seg_id) if seg_id else None
if query_net:
# The network is already created no need to process the event.
if query_net.source.lower() == 'dcnm':
# DCNM created the network. Only update network id in database.
prev_id = query_net.network_id
params = dict(columns=dict(network_id=net_id))
self.update_network(prev_id, **params)
# Update the network cache.
prev_info = self.network.pop(prev_id)
prev_info['id'] = net_id
self.network[net_id] = prev_info
# Update the network name. After extracting the segmentation_id
# no need to keep it in the name. Removing it and update
# the network.
updated_net_name = (
net_name[:nobj.start(0) + len(net_ext_name) - 1])
try:
body = {'network': {'name': updated_net_name, }}
dcnm_net = self.neutronclient.update_network(
net_id, body=body).get('network')
LOG.debug('Updated network %(network)s', dcnm_net)
except Exception as exc:
LOG.exception('Failed to update network '
'%(network)s. Reason %(err)s.',
{'network': updated_net_name,
'err': str(exc)})
return
LOG.info('network_create_event: network %(name)s was created '
'by %(source)s. Ignoring processing the event.',
{'name': net_name, 'source': 'dcnm'})
return
if network_db_elem:
LOG.debug("Network %s exists, not processing" % net_name)
return
# Check if project (i.e. tenant) exist.
tenant_name = self.get_project_name(tenant_id)
if not tenant_name:
LOG.error('Failed to create network %(name)s. Project '
'%(tenant_id)s does not exist.',
{'name': net_name, 'tenant_id': tenant_id})
return
pseg_id = self.network[net_id].get('provider:segmentation_id')
seg_id = self._get_segmentation_id(net_id, pseg_id, 'openstack')
self.network[net_id]['segmentation_id'] = seg_id
try:
cfgp, fwd_mod = self.dcnm_client.get_config_profile_for_network(
net.get('name'))
self.network[net_id]['config_profile'] = cfgp
self.network[net_id]['fwd_mod'] = fwd_mod
self.add_network_db(net_id, self.network[net_id],
'openstack',
constants.SUBNET_PENDING)
LOG.debug('network_create_event: network=%s', self.network)
except dexc.DfaClientRequestFailed:
# Fail to get config profile from DCNM.
# Save the network info with failure result and send the request
# to DCNM later.
self.add_network_db(net_id, self.network[net_id], 'openstack',
constants.CREATE_FAIL)
LOG.error('Failed to create network=%s.', self.network) | Create network in database and dcnm
:param net: network dictionary | entailment |
def network_delete_event(self, network_info):
"""Process network delete event."""
net_id = network_info['network_id']
if net_id not in self.network:
LOG.error('network_delete_event: net_id %s does not exist.',
net_id)
return
segid = self.network[net_id].get('segmentation_id')
tenant_id = self.network[net_id].get('tenant_id')
tenant_name = self.get_project_name(tenant_id)
net = utils.Dict2Obj(self.network[net_id])
if not tenant_name:
LOG.error('Project %(tenant_id)s does not exist.',
{'tenant_id': tenant_id})
self.update_network_db(net.id, constants.DELETE_FAIL)
return
try:
self.dcnm_client.delete_network(tenant_name, net)
# Put back the segmentation id into the pool.
self.seg_drvr.release_segmentation_id(segid)
# Remove entry from database and cache.
self.delete_network_db(net_id)
del self.network[net_id]
snets = [k for k in self.subnet if (
self.subnet[k].get('network_id') == net_id)]
[self.subnet.pop(s) for s in snets]
except dexc.DfaClientRequestFailed:
LOG.error('Failed to create network %(net)s.',
{'net': net.name})
self.update_network_db(net_id, constants.DELETE_FAIL)
# deleting all related VMs
instances = self.get_vms()
instances_related = [k for k in instances if k.network_id == net_id]
for vm in instances_related:
LOG.debug("deleting vm %s because network is deleted", vm.name)
self.delete_vm_function(vm.port_id, vm)
self.network_del_notif(tenant_id, tenant_name, net_id) | Process network delete event. | entailment |
def dcnm_network_create_event(self, network_info):
"""Process network create event from DCNM."""
# 1. Add network info to database before sending request to
# neutron to create the network.
# Check if network is already created.
pre_seg_id = network_info.get('segmentation_id')
pre_project_name = network_info.get('project_name')
pre_partition_name = network_info.get('partition_name')
if not pre_seg_id or not pre_partition_name or not pre_project_name:
LOG.error('Invalid network event: %s', network_info)
return
# Check if partition name is the one that openstack created.
if pre_partition_name != self.cfg.dcnm.default_partition_name:
LOG.error('Failed to create network. Partition %(part)s is '
'not %(os_part)s which is created by openstack.',
{'part': pre_partition_name,
'os_part': self.cfg.dcnm.default_partition_name})
return
query_net = self.get_network_by_segid(pre_seg_id)
if query_net:
# The network is already created no need to process the event.
LOG.info('dcnm_network_create_event: network %(name)s was '
'created. Ignoring processing the event.',
{'name': query_net.name})
return
dcnm_net_info = self.dcnm_client.get_network(pre_project_name,
pre_seg_id)
if not dcnm_net_info:
LOG.info('No network details for %(org)s and %(segid)s',
{'org': pre_project_name, 'segid': pre_seg_id})
return
net_id = utils.get_uuid()
pseg_id = dcnm_net_info.get('segmentId')
seg_id = self._get_segmentation_id(net_id, pseg_id, 'DCNM')
cfgp = dcnm_net_info.get('profileName')
net_name = dcnm_net_info.get('networkName')
fwd_mod = self.dcnm_client.config_profile_fwding_mode_get(cfgp)
tenant_name = dcnm_net_info.get('organizationName')
tenant_id = self.get_project_id(tenant_name)
# Get the subnet details.
subnet = dcnm_net_info.get('dhcpScope')
if not subnet:
# The dhcpScope is not provided. Calculating the cidr based on
# gateway ip and netmask.
gw_addr = dcnm_net_info.get('gateway')
net_mask = dcnm_net_info.get('netmaskLength')
cidr = utils.make_cidr(gw_addr, net_mask)
if not cidr:
LOG.error('Failed to create network: '
'cidr is None for %(gw)s %(mask)s',
{'gw': gw_addr, 'mask': net_mask})
return
subnet = dict(gateway=gw_addr, subnet=cidr)
# Check if parameters are provided.
if not (net_name and tenant_id and seg_id and subnet):
LOG.error('Invalid value: network %(name)s tenant_id '
'%(tenant_id)s segmentation_id %(seg_id)s '
'subnet %(subnet)s.', {'name': net_name,
'tenant_id': tenant_id,
'seg_id': seg_id,
'subnet': subnet})
return
# Update network cache and add the network to the database.
net_ext_name = self.cfg.dcnm.dcnm_net_ext
self.network[net_id] = dict(segmentation_id=seg_id,
config_profile=cfgp,
fwd_mod=fwd_mod,
tenant_id=tenant_id,
name=net_name + net_ext_name,
id=net_id,
source='DCNM')
self.add_network_db(net_id, self.network[net_id], 'DCNM',
constants.RESULT_SUCCESS)
# 2. Send network create request to neutron
try:
# With create_network (called below), the same request comes as
# notification and it will be processed in the
# create_network_event. The request should not be processed as it
# is already processed here.
# The only way to decide whether it is for a new network or not is
# the segmentation_id (DCNM does not have uuid for network) which
# is unique. For that reason it is needed to send segmentation_id
# when creating network in openstack.
# Moreover, we are using network_type=local and for that reason
# provider:segmentation_id cannot be added as parameter when
# creating network. One solution is to embed segmentation_id in the
# network name. Then, when processing the notification, if the
# request is from DCNM, the segmentation_id will be extracted from
# network name. With that create_network_event can decide to
# process or deny an event.
updated_net_name = net_name + net_ext_name + str(seg_id)
body = {'network': {'name': updated_net_name,
'tenant_id': tenant_id,
'admin_state_up': True}}
dcnm_net = self.neutronclient.create_network(
body=body).get('network')
net_id = dcnm_net.get('id')
except Exception as exc:
# Failed to create network, do clean up.
# Remove the entry from database and local cache.
del self.network[net_id]
self.delete_network_db(net_id)
LOG.exception('dcnm_network_create_event: Failed to create '
'%(network)s. Reason %(err)s.',
{'network': body, 'err': str(exc)})
return
LOG.debug('dcnm_network_create_event: Created network %(network)s', (
body))
# 3. Send subnet create request to neutron.
pool = subnet.get('ipRange')
allocation_pools = []
if pool:
allocation_pools = [{'start': s, 'end': e} for s, e in
[p.split('-') for p in pool.split(',')]]
try:
body = {'subnet': {'cidr': subnet.get('subnet'),
'gateway_ip': subnet.get('gateway'),
'ip_version': 4,
'network_id': net_id,
'tenant_id': tenant_id,
'enable_dhcp': not self.dcnm_dhcp,
'allocation_pools': allocation_pools, }}
if not self.dcnm_dhcp:
body.get('subnet').pop('allocation_pools')
# Send requenst to create subnet in neutron.
LOG.debug('Creating subnet %(subnet)s for DCNM request.', body)
dcnm_subnet = self.neutronclient.create_subnet(
body=body).get('subnet')
subnet_id = dcnm_subnet.get('id')
# Update subnet cache.
self.subnet[subnet_id] = {}
self.subnet[subnet_id].update(body.get('subnet'))
except Exception as exc:
# Failed to create network, do clean up if necessary.
LOG.exception('Failed to create subnet %(subnet)s for DCNM '
'request. Error %(err)s',
{'subnet': body['subnet'], 'err': str(exc)})
LOG.debug('dcnm_network_create_event: Created subnet %(subnet)s', (
body)) | Process network create event from DCNM. | entailment |
def dcnm_network_delete_event(self, network_info):
"""Process network delete event from DCNM."""
seg_id = network_info.get('segmentation_id')
if not seg_id:
LOG.error('Failed to delete network. Invalid network '
'info %s.', network_info)
query_net = self.get_network_by_segid(seg_id)
if not query_net:
LOG.info('dcnm_network_delete_event: network %(segid)s '
'does not exist.', {'segid': seg_id})
return
if self.fw_api.is_network_source_fw(query_net, query_net.name):
LOG.info("Service network %s, returning", query_net.name)
return
# Send network delete request to neutron
try:
del_net = self.network.pop(query_net.network_id)
self.neutronclient.delete_network(query_net.network_id)
self.delete_network_db(query_net.network_id)
except Exception as exc:
# Failed to delete network.
# Put back the entry to the local cache???
self.network[query_net.network_id] = del_net
LOG.exception('dcnm_network_delete_event: Failed to delete '
'%(network)s. Reason %(err)s.',
{'network': query_net.name, 'err': str(exc)}) | Process network delete event from DCNM. | entailment |
def update_port_ip_address(self):
"""Find the ip address that assinged to a port via DHCP
The port database will be updated with the ip address.
"""
leases = None
req = dict(ip='0.0.0.0')
instances = self.get_vms_for_this_req(**req)
if instances is None:
return
for vm in instances:
if not leases:
# For the first time finding the leases file.
leases = self._get_ip_leases()
if not leases:
# File does not exist.
return
for line in leases:
if line.startswith('lease') and line.endswith('{\n'):
ip_addr = line.split()[1]
if 'hardware ethernet' in line:
if vm.mac == line.replace(';', '').split()[2]:
LOG.info('Find IP address %(ip)s for %(mac)s',
{'ip': ip_addr, 'mac': vm.mac})
try:
rule_info = dict(ip=ip_addr, mac=vm.mac,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update"
"rules.")
else:
params = dict(columns=dict(ip=ip_addr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ip_addr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to '
'agent.') | Find the ip address that assinged to a port via DHCP
The port database will be updated with the ip address. | entailment |
def send_vm_info(self, vm_info):
"""Send vm info to the compute host.
it will return True/False
"""
agent_host = vm_info.get('host')
if not agent_host:
LOG.info("vm/port is not bound to host, not sending vm info")
return True
try:
self.neutron_event.send_vm_info(agent_host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
# Failed to send info to the agent. Keep the data in the
# database as failure to send it later.
LOG.error('Failed to send VM info to agent %s', agent_host)
return False
else:
return True | Send vm info to the compute host.
it will return True/False | entailment |
def request_vms_info(self, payload):
"""Get the VMs from the database and send the info to the agent."""
# This request is received from an agent when it runs for the first
# time and uplink is detected.
agent = payload.get('agent')
LOG.debug('request_vms_info: Getting VMs info for %s', agent)
req = dict(host=payload.get('agent'))
instances = self.get_vms_for_this_req(**req)
vm_info = []
for vm in instances:
vm_info.append(dict(status=vm.status,
vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host,
port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco')))
try:
self.neutron_event.send_vm_info(agent, str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | Get the VMs from the database and send the info to the agent. | entailment |
def request_uplink_info(self, payload):
"""Get the uplink from the database and send the info to the agent."""
# This request is received from an agent when it run for the first
# Send the uplink name (physical port name that connectes compute
# node and switch fabric),
agent = payload.get('agent')
config_res = self.get_agent_configurations(agent)
LOG.debug('configurations on %(agent)s is %(cfg)s', (
{'agent': agent, 'cfg': config_res}))
try:
self.neutron_event.send_msg_to_agent(agent,
constants.UPLINK_NAME,
config_res)
except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError):
LOG.error("RPC error: Failed to send uplink name to agent.") | Get the uplink from the database and send the info to the agent. | entailment |
def set_static_ip_address(self, payload):
"""Set static ip address for a VM."""
# This request is received from CLI for setting ip address of an
# instance.
macaddr = payload.get('mac')
ipaddr = payload.get('ip')
# Find the entry associated with the mac in the database.
req = dict(mac=macaddr)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info('Updating IP address: %(ip)s %(mac)s.',
{'ip': ipaddr, 'mac': macaddr})
# Send request to update the rule.
try:
rule_info = dict(ip=ipaddr, mac=macaddr,
port=vm.port_id,
status='up')
self.neutron_event.update_ip_rule(str(vm.host),
str(rule_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error("RPC error: Failed to update rules.")
else:
# Update the database.
params = dict(columns=dict(ip=ipaddr))
self.update_vm_db(vm.port_id, **params)
# Send update to the agent.
vm_info = dict(status=vm.status, vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=ipaddr,
vm_name=vm.name,
vm_uuid=vm.instance_id,
gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod,
oui_id='cisco'))
try:
self.neutron_event.send_vm_info(vm.host,
str(vm_info))
except (rpc.MessagingTimeout, rpc.RPCException,
rpc.RemoteError):
LOG.error('Failed to send VM info to agent.') | Set static ip address for a VM. | entailment |
def vm_result_update(self, payload):
"""Update the result field in VM database.
This request comes from an agent that needs to update the result
in VM database to success or failure to reflect the operation's result
in the agent.
"""
port_id = payload.get('port_id')
result = payload.get('result')
if port_id and result:
# Update the VM's result field.
params = dict(columns=dict(result=result))
self.update_vm_db(port_id, **params) | Update the result field in VM database.
This request comes from an agent that needs to update the result
in VM database to success or failure to reflect the operation's result
in the agent. | entailment |
def add_lbaas_port(self, port_id, lb_id):
"""Give port id, get port info and send vm info to agent.
:param port_id: port id of vip port
:param lb_id: vip id for v1 and lbaas_id for v2
"""
port_info = self.neutronclient.show_port(port_id)
port = port_info.get('port')
if not port:
LOG.error("Can not retrieve port info for port %s" % port_id)
return
LOG.debug("lbaas add port, %s", port)
if not port['binding:host_id']:
LOG.info("No host bind for lbaas port, octavia case")
return
port["device_id"] = lb_id
vm_info = self._make_vm_info(port, 'up', constants.LBAAS_PREFIX)
self.port[port_id] = vm_info
if self.send_vm_info(vm_info):
self.add_vms_db(vm_info, constants.RESULT_SUCCESS)
else:
self.add_vms_db(vm_info, constants.CREATE_FAIL) | Give port id, get port info and send vm info to agent.
:param port_id: port id of vip port
:param lb_id: vip id for v1 and lbaas_id for v2 | entailment |
def delete_lbaas_port(self, lb_id):
"""send vm down event and delete db.
:param lb_id: vip id for v1 and lbaas_id for v2
"""
lb_id = lb_id.replace('-', '')
req = dict(instance_id=lb_id)
instances = self.get_vms_for_this_req(**req)
for vm in instances:
LOG.info("deleting lbaas vm %s " % vm.name)
self.delete_vm_function(vm.port_id, vm) | send vm down event and delete db.
:param lb_id: vip id for v1 and lbaas_id for v2 | entailment |
def vip_create_event(self, vip_info):
"""Process vip create event."""
vip_data = vip_info.get('vip')
port_id = vip_data.get('port_id')
vip_id = vip_data.get('id')
self.add_lbaas_port(port_id, vip_id) | Process vip create event. | entailment |
def listener_create_event(self, listener_info):
"""Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted
"""
listener_data = listener_info.get('listener')
lb_list = listener_data.get('loadbalancers')
for lb in lb_list:
lb_id = lb.get('id')
req = dict(instance_id=(lb_id.replace('-', '')))
instances = self.get_vms_for_this_req(**req)
if not instances:
lb_info = self.neutronclient.show_loadbalancer(lb_id)
if lb_info:
port_id = lb_info["loadbalancer"]["vip_port_id"]
self.add_lbaas_port(port_id, lb_id)
else:
LOG.info("lbaas port for lb %s already added" % lb_id) | Process listener create event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted | entailment |
def listener_delete_event(self, listener_info):
"""Process listener delete event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted.
as the data only contains listener id, we will
scan all loadbalancers from db and delete the vdp
if their admin state is down in that loadbalancer
"""
lb_list = self.neutronclient.list_loadbalancers()
for lb in lb_list.get('loadbalancers'):
if not lb.get("listeners"):
lb_id = lb.get('id')
LOG.info("Deleting lb %s port" % lb_id)
self.delete_lbaas_port(lb_id) | Process listener delete event.
This is lbaas v2
vif will be plugged into ovs when first
listener is created and unpluged from ovs
when last listener is deleted.
as the data only contains listener id, we will
scan all loadbalancers from db and delete the vdp
if their admin state is down in that loadbalancer | entailment |
def pool_create_event(self, pool_info):
"""Process pool create event.
Extract pool info and get listener info and call next
listen_create_event
"""
pool_data = pool_info.get('pool')
listeners = pool_data.get('listeners')
for listener in listeners:
l_id = listener.get('id')
l_info = self.neutronclient.show_listener(l_id)
self.listener_create_event(l_info) | Process pool create event.
Extract pool info and get listener info and call next
listen_create_event | entailment |
def sync_projects(self):
"""Sync projects.
This function will retrieve project from keystone
and populate them dfa database and dcnm
"""
p = self.keystone_event._service.projects.list()
for proj in p:
if proj.name in not_create_project_name:
continue
LOG.info("Syncing project %s" % proj.name)
self.project_create_func(proj.id, proj=proj) | Sync projects.
This function will retrieve project from keystone
and populate them dfa database and dcnm | entailment |
def sync_networks(self):
"""sync networks.
It will retrieve networks from neutron and populate
them in dfa database and dcnm
"""
nets = self.neutronclient.list_networks()
for net in nets.get("networks"):
LOG.info("Syncing network %s", net["id"])
self.network_create_func(net)
subnets = self.neutronclient.list_subnets()
for subnet in subnets.get("subnets"):
LOG.info("Syncing subnet %s", subnet["id"])
self.create_subnet(subnet) | sync networks.
It will retrieve networks from neutron and populate
them in dfa database and dcnm | entailment |
def create_threads(self):
"""Create threads on server."""
# Create thread for neutron notifications.
neutron_thrd = utils.EventProcessingThread('Neutron_Event',
self.neutron_event,
'event_handler',
self._excpq)
self.dfa_threads.append(neutron_thrd)
# Create thread for processing notification events.
qp_thrd = utils.EventProcessingThread('Event_Queue', self,
'process_queue', self._excpq)
self.dfa_threads.append(qp_thrd)
# Create thread for keystone notifications.
keys_thrd = utils.EventProcessingThread('Keystone_Event',
self.keystone_event,
'event_handler', self._excpq)
self.dfa_threads.append(keys_thrd)
# Create thread to process RPC calls.
hb_thrd = utils.EventProcessingThread('RPC_Server', self, 'start_rpc',
self._excpq)
self.dfa_threads.append(hb_thrd)
# Create thread to listen to dcnm network events.
if self.dcnm_event is not None:
dcnmL_thrd = utils.EventProcessingThread('DcnmListener',
self.dcnm_event,
'process_amqp_msgs',
self._excpq)
self.dfa_threads.append(dcnmL_thrd)
# Create periodic task to process failure cases in create/delete
# networks and projects.
fr_thrd = utils.PeriodicTask(interval=constants.FAIL_REC_INTERVAL,
func=self.add_events,
event_queue=self.pqueue,
priority=self.PRI_LOW_START + 10,
excq=self._excpq)
# Start all the threads.
for t in self.dfa_threads:
t.start()
# Run the periodic tasks.
fr_thrd.run() | Create threads on server. | entailment |
def _allocate_specified_segment(self, session, seg_id, source):
"""Allocate specified segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed (duplicates), then return None
"""
try:
with session.begin(subtransactions=True):
alloc = (session.query(self.model).filter_by(
segmentation_id=seg_id).first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
count = (session.query(self.model).
filter_by(allocated=False,
segmentation_id=seg_id).update(
{"allocated": True}))
if count:
return alloc
# Segment to create or already allocated
alloc = self.model(segmentation_id=seg_id,
allocated=True, source=source)
session.add(alloc)
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
return alloc | Allocate specified segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed (duplicates), then return None | entailment |
def _allocate_segment(self, session, net_id, source):
"""Allocate segment from pool.
Return allocated db object or None.
"""
with session.begin(subtransactions=True):
hour_lapse = utils.utc_time_lapse(self.seg_timeout)
count = (session.query(self.model).filter(
self.model.delete_time < hour_lapse).update(
{"delete_time": None}))
select = (session.query(self.model).filter_by(allocated=False,
delete_time=None))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No segment resource available")
# No resource available
return
count = (session.query(self.model).
filter_by(segmentation_id=alloc.segmentation_id,
allocated=False).update({"allocated": True,
"network_id": net_id,
"source": source}))
if count:
return alloc
LOG.error("ERROR: Failed to allocate segment for net %(net)s"
" source %(src)s",
{'net': net_id, 'src': source}) | Allocate segment from pool.
Return allocated db object or None. | entailment |
def allocate_subnet(self, subnet_lst, net_id=None):
"""Allocate subnet from pool.
Return allocated db object or None.
"""
session = db.get_session()
query_str = None
for sub in subnet_lst:
sub_que = (self.model.subnet_address != sub)
if query_str is not None:
query_str = query_str & sub_que
else:
query_str = sub_que
with session.begin(subtransactions=True):
select = (session.query(self.model).filter(
(self.model.allocated == 0) & query_str))
# Selected segment can be allocated before update by someone else,
# We retry until update success or DB_MAX_RETRIES retries
for attempt in range(DB_MAX_RETRIES + 1):
alloc = select.first()
if not alloc:
LOG.info("No subnet resource available")
return
count = (session.query(self.model).
filter_by(subnet_address=alloc.subnet_address,
allocated=False).update({"allocated": True,
"network_id": net_id}))
if count:
return alloc.subnet_address
LOG.error("ERROR: Failed to allocate subnet for net %(net)s",
{'net': net_id})
return None | Allocate subnet from pool.
Return allocated db object or None. | entailment |
def add_update_topology_db(self, **params):
"""Add or update an entry to the topology DB. """
topo_dict = params.get('columns')
session = db.get_session()
host = topo_dict.get('host')
protocol_interface = topo_dict.get('protocol_interface')
with session.begin(subtransactions=True):
try:
# Check if entry exists.
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).one()
session.query(DfaTopologyDb).filter_by(
host=host, protocol_interface=protocol_interface).update(
topo_dict)
except orm_exc.NoResultFound:
LOG.info("Creating new topology entry for host "
"%(host)s on Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
topo_disc = DfaTopologyDb(
host=host, protocol_interface=protocol_interface,
phy_interface=topo_dict.get('phy_interface'),
created=topo_dict.get('created'),
heartbeat=topo_dict.get('heartbeat'),
remote_mgmt_addr=topo_dict.get('remote_mgmt_addr'),
remote_system_name=topo_dict.get('remote_system_name'),
remote_system_desc=topo_dict.get('remote_system_desc'),
remote_port_id_mac=topo_dict.get('remote_port_id_mac'),
remote_chassis_id_mac=topo_dict.get(
'remote_chassis_id_mac'),
remote_port=topo_dict.get('remote_port'),
remote_evb_cfgd=topo_dict.get('remote_evb_cfgd'),
remote_evb_mode=topo_dict.get('remote_evb_mode'),
configurations=topo_dict.get('configurations'))
session.add(topo_disc)
except orm_exc.MultipleResultsFound:
LOG.error("More than one enty found for agent %(host)s."
"Interface %(intf)s",
{'host': host, 'intf': protocol_interface})
except Exception as exc:
LOG.error("Exception in add_update_topology_db %s", exc) | Add or update an entry to the topology DB. | entailment |
def _convert_topo_obj_dict(self, topology_objs):
"""Convert topology object to dict. """
topo_lst = []
for topo_obj in topology_objs:
topo_dct = {
'host': topo_obj.host,
'protocol_interface': topo_obj.protocol_interface,
'phy_interface': topo_obj.phy_interface,
'created': topo_obj.created, 'heartbeat': topo_obj.heartbeat,
'remote_mgmt_addr': topo_obj.remote_mgmt_addr,
'remote_system_name': topo_obj.remote_system_name,
'remote_system_desc': topo_obj.remote_system_desc,
'remote_port_id_mac': topo_obj.remote_port_id_mac,
'remote_chassis_id_mac': topo_obj.remote_chassis_id_mac,
'remote_port': topo_obj.remote_port,
'remote_evb_cfgd': topo_obj.remote_evb_cfgd,
'remote_evb_mode': topo_obj.remote_evb_mode,
'configurations': topo_obj.configurations}
topo_lst.append(topo_dct)
return topo_lst | Convert topology object to dict. | entailment |
def query_topology_db(self, dict_convert=False, **req):
"""Query an entry to the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
# Check if entry exists.
topo_disc = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return None
if dict_convert:
return self._convert_topo_obj_dict(topo_disc)
return topo_disc | Query an entry to the topology DB. | entailment |
def delete_topology_entry(self, **req):
"""Delete the entries from the topology DB. """
session = db.get_session()
with session.begin(subtransactions=True):
try:
rows = session.query(DfaTopologyDb).filter_by(**req).all()
except orm_exc.NoResultFound:
LOG.info("No Topology results found for %s", req)
return
try:
for row in rows:
session.delete(row)
except Exception as exc:
LOG.error("Exception raised %s", str(exc)) | Delete the entries from the topology DB. | entailment |
def enable_lldp(self, port_name, is_ncb=True, is_nb=False):
"""Function to enable LLDP on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["-L", "-i", port_name, "-g", "ncb",
"adminStatus=rxtx"])
elif is_nb:
reply = self.run_lldptool(["-L", "-i", port_name, "-g", "nb",
"adminStatus=rxtx"])
else:
LOG.error("Both NCB and NB are not selected to "
"enable LLDP")
return False
if reply is None:
return False
exp_str = "adminstatus=rxtx"
if exp_str in reply.replace(" ", "").lower():
return True
else:
return False | Function to enable LLDP on the interface. | entailment |
def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False):
"""Function to Query LLDP TLV on the interface. """
reply = None
if is_ncb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "ncb"])
elif is_nb:
reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name,
"-g", "nb"])
else:
LOG.error("Both NCB and NB are not selected to "
"query LLDP")
return reply | Function to Query LLDP TLV on the interface. | entailment |
def run_lldptool(self, args):
"""Function for invoking the lldptool utility. """
full_args = ['lldptool'] + args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as exc:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': full_args, 'exception': str(exc)}) | Function for invoking the lldptool utility. | entailment |
def _check_common_tlv_format(self, tlv_complete_data, tlv_data_pattern,
tlv_string):
"""Check for the common TLV format. """
if tlv_complete_data is None:
return False, None
tlv_string_split = tlv_complete_data.split(tlv_string)
if len(tlv_string_split) < 2:
return False, None
next_tlv_list = tlv_string_split[1].split('TLV')[0]
tlv_val_set = next_tlv_list.split(tlv_data_pattern)
if len(tlv_val_set) < 2:
return False, None
return True, tlv_val_set | Check for the common TLV format. | entailment |
def get_remote_evb_mode(self, tlv_data):
"""Returns the EVB mode in the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "mode:", "EVB Configuration TLV")
if not ret:
return None
mode_val = parsed_val[1].split()[0].strip()
return mode_val | Returns the EVB mode in the TLV. | entailment |
def get_remote_mgmt_addr(self, tlv_data):
"""Returns Remote Mgmt Addr from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "IPv4:", "Management Address TLV")
if not ret:
return None
addr_fam = 'IPv4:'
addr = parsed_val[1].split('\n')[0].strip()
return addr_fam + addr | Returns Remote Mgmt Addr from the TLV. | entailment |
def get_remote_sys_desc(self, tlv_data):
"""Returns Remote Sys Desc from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Description TLV")
if not ret:
return None
return parsed_val[1].strip() | Returns Remote Sys Desc from the TLV. | entailment |
def get_remote_sys_name(self, tlv_data):
"""Returns Remote Sys Name from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "System Name TLV")
if not ret:
return None
return parsed_val[1].strip() | Returns Remote Sys Name from the TLV. | entailment |
def get_remote_port(self, tlv_data):
"""Returns Remote Port from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "\n", "Port Description TLV")
if not ret:
return None
return parsed_val[1].strip() | Returns Remote Port from the TLV. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.