sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def get_remote_chassis_id_mac(self, tlv_data):
"""Returns Remote Chassis ID MAC from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "MAC:", "Chassis ID TLV")
if not ret:
return None
mac = parsed_val[1].split('\n')
return mac[0].strip() | Returns Remote Chassis ID MAC from the TLV. | entailment |
def get_remote_port_id_local(self, tlv_data):
"""Returns Remote Port ID Local from the TLV. """
ret, parsed_val = self._check_common_tlv_format(
tlv_data, "Local:", "Port ID TLV")
if not ret:
return None
local = parsed_val[1].split('\n')
return local[0].strip() | Returns Remote Port ID Local from the TLV. | entailment |
def format_interface_name(intf_type, port, ch_grp=0):
"""Method to format interface name given type, port.
Given interface type, port, and channel-group, this
method formats an interface name. If channel-group is
non-zero, then port-channel is configured.
:param intf_type: Such as 'ethernet' or 'port-channel'
:param port: unique identification -- 1/32 or 1
:ch_grp: If non-zero, ignore other params and format
port-channel<ch_grp>
:returns: the full formatted interface name.
ex: ethernet:1/32, port-channel:1
"""
if ch_grp > 0:
return 'port-channel:%s' % str(ch_grp)
return '%s:%s' % (intf_type.lower(), port) | Method to format interface name given type, port.
Given interface type, port, and channel-group, this
method formats an interface name. If channel-group is
non-zero, then port-channel is configured.
:param intf_type: Such as 'ethernet' or 'port-channel'
:param port: unique identification -- 1/32 or 1
:ch_grp: If non-zero, ignore other params and format
port-channel<ch_grp>
:returns: the full formatted interface name.
ex: ethernet:1/32, port-channel:1 | entailment |
def split_interface_name(interface, ch_grp=0):
"""Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name
"""
interface = interface.lower()
if ch_grp != 0:
intf_type = 'port-channel'
port = str(ch_grp)
elif ':' in interface:
intf_type, port = interface.split(':')
elif interface.startswith('ethernet'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('ethernet')
elif interface.startswith('port-channel'):
interface = interface.replace(" ", "")
_, intf_type, port = interface.partition('port-channel')
else:
intf_type, port = 'ethernet', interface
return intf_type, port | Method to split interface type, id from name.
Takes an interface name or just interface suffix
and returns interface type and number separately.
:param interface: interface name or just suffix
:param ch_grp: if non-zero, ignore interface
name and return 'port-channel' grp
:returns: interface type like 'ethernet'
:returns: returns suffix to interface name | entailment |
def _host_notification(self, context, method, payload, host):
"""Notify the cfg agent that is handling the hosting device."""
LOG.debug('Notify Cisco cfg agent at %(host)s the message '
'%(method)s', {'host': host, 'method': method})
cctxt = self.client.prepare(server=host)
cctxt.cast(context, method, payload=payload) | Notify the cfg agent that is handling the hosting device. | entailment |
def _agent_notification(self, context, method, hosting_devices, operation):
"""Notify individual Cisco cfg agents."""
admin_context = context.is_admin and context or context.elevated()
for hosting_device in hosting_devices:
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, hosting_device['id'], admin_state_up=True,
schedule=True)
for agent in agents:
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s',
{'agent_type': agent.agent_type,
'topic': agent.topic,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host)
cctxt.cast(context, method) | Notify individual Cisco cfg agents. | entailment |
def agent_updated(self, context, admin_state_up, host):
"""Updates cfg agent on <host> to enable or disable it."""
self._host_notification(context, 'agent_updated',
{'admin_state_up': admin_state_up}, host) | Updates cfg agent on <host> to enable or disable it. | entailment |
def hosting_devices_unassigned_from_cfg_agent(self, context, ids, host):
"""Notify cfg agent to no longer handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_unassigned_from_cfg_agent',
{'hosting_device_ids': ids}, host) | Notify cfg agent to no longer handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>. | entailment |
def hosting_devices_assigned_to_cfg_agent(self, context, ids, host):
"""Notify cfg agent to now handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>.
"""
self._host_notification(context,
'hosting_devices_assigned_to_cfg_agent',
{'hosting_device_ids': ids}, host) | Notify cfg agent to now handle some hosting devices.
This notification relieves the cfg agent in <host> of responsibility
to monitor and configure hosting devices with id specified in <ids>. | entailment |
def hosting_devices_removed(self, context, hosting_data, deconfigure,
host):
"""Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices::
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices
"""
if hosting_data:
self._host_notification(context, 'hosting_devices_removed',
{'hosting_data': hosting_data,
'deconfigure': deconfigure}, host) | Notify cfg agent that some hosting devices have been removed.
This notification informs the cfg agent in <host> that the
hosting devices in the <hosting_data> dictionary have been removed
from the hosting device pool. The <hosting_data> dictionary also
contains the ids of the affected logical resources for each hosting
devices::
{'hd_id1': {'routers': [id1, id2, ...],
'fw': [id1, ...],
...},
'hd_id2': {'routers': [id3, id4, ...]},
'fw': [id1, ...],
...},
...}
The <deconfigure> argument is True if any configurations for the
logical resources should be removed from the hosting devices | entailment |
def get_hosting_device_configuration(self, context, id):
"""Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device.
"""
admin_context = context.is_admin and context or context.elevated()
agents = self._dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [id], admin_state_up=True, schedule=True)
if agents:
cctxt = self.client.prepare(server=agents[0].host)
return cctxt.call(context, 'get_hosting_device_configuration',
payload={'hosting_device_id': id}) | Fetch configuration of hosting device with id.
The configuration agent should respond with the running config of
the hosting device. | entailment |
def store_policy(self, pol_id, policy):
"""Store the policy.
Policy is maintained as a dictionary of pol ID.
"""
if pol_id not in self.policies:
self.policies[pol_id] = policy
self.policy_cnt += 1 | Store the policy.
Policy is maintained as a dictionary of pol ID. | entailment |
def store_rule(self, rule_id, rule):
"""Store the rules.
Policy is maintained as a dictionary of Rule ID.
"""
if rule_id not in self.rules:
self.rules[rule_id] = rule
self.rule_cnt += 1 | Store the rules.
Policy is maintained as a dictionary of Rule ID. | entailment |
def delete_rule(self, rule_id):
"""Delete the specific Rule from dictionary indexed by rule id. """
if rule_id not in self.rules:
LOG.error("No Rule id present for deleting %s", rule_id)
return
del self.rules[rule_id]
self.rule_cnt -= 1 | Delete the specific Rule from dictionary indexed by rule id. | entailment |
def rule_update(self, rule_id, rule):
"""Update the rule. """
if rule_id not in self.rules:
LOG.error("Rule ID not present %s", rule_id)
return
self.rules[rule_id].update(rule) | Update the rule. | entailment |
def is_fw_present(self, fw_id):
"""Returns if firewall index by ID is present in dictionary. """
if self.fw_id is None or self.fw_id != fw_id:
return False
else:
return True | Returns if firewall index by ID is present in dictionary. | entailment |
def create_fw(self, proj_name, pol_id, fw_id, fw_name, fw_type, rtr_id):
"""Fills up the local attributes when FW is created. """
self.tenant_name = proj_name
self.fw_id = fw_id
self.fw_name = fw_name
self.fw_created = True
self.active_pol_id = pol_id
self.fw_type = fw_type
self.router_id = rtr_id | Fills up the local attributes when FW is created. | entailment |
def delete_fw(self, fw_id):
"""Deletes the FW local attributes. """
self.fw_id = None
self.fw_name = None
self.fw_created = False
self.active_pol_id = None | Deletes the FW local attributes. | entailment |
def delete_policy(self, pol_id):
"""Deletes the policy from the local dictionary. """
if pol_id not in self.policies:
LOG.error("Invalid policy %s", pol_id)
return
del self.policies[pol_id]
self.policy_cnt -= 1 | Deletes the policy from the local dictionary. | entailment |
def is_fw_complete(self):
"""This API returns the completion status of FW.
This returns True if a FW is created with a active policy that has
more than one rule associated with it and if a driver init is done
successfully.
"""
LOG.info("In fw_complete needed %(fw_created)s "
"%(active_policy_id)s %(is_fw_drvr_created)s "
"%(pol_present)s %(fw_type)s",
{'fw_created': self.fw_created,
'active_policy_id': self.active_pol_id,
'is_fw_drvr_created': self.is_fw_drvr_created(),
'pol_present': self.active_pol_id in self.policies,
'fw_type': self.fw_type})
if self.active_pol_id is not None:
LOG.info("In Drvr create needed %(len_policy)s %(one_rule)s",
{'len_policy':
len(self.policies[self.active_pol_id]['rule_dict']),
'one_rule':
self.one_rule_present(self.active_pol_id)})
return self.fw_created and self.active_pol_id and (
self.is_fw_drvr_created()) and self.fw_type and (
self.active_pol_id in self.policies) and (
len(self.policies[self.active_pol_id]['rule_dict'])) > 0 and (
self.one_rule_present(self.active_pol_id)) | This API returns the completion status of FW.
This returns True if a FW is created with a active policy that has
more than one rule associated with it and if a driver init is done
successfully. | entailment |
def one_rule_present(self, pol_id):
"""Returns if atleast one rule is present in the policy. """
pol_dict = self.policies[pol_id]
for rule in pol_dict['rule_dict']:
if self.is_rule_present(rule):
return True
return False | Returns if atleast one rule is present in the policy. | entailment |
def get_fw_dict(self):
"""This API creates a FW dictionary from the local attributes. """
fw_dict = {}
if self.fw_id is None:
return fw_dict
fw_dict = {'rules': {}, 'tenant_name': self.tenant_name,
'tenant_id': self.tenant_id, 'fw_id': self.fw_id,
'fw_name': self.fw_name,
'firewall_policy_id': self.active_pol_id,
'fw_type': self.fw_type, 'router_id': self.router_id}
# When Firewall and Policy are both deleted and the SM is doing a
# retry (maybe DCNM Out partition could not be deleted) during
# which without this check, it throws an exception since
# self.policies is empty. This is also an issue during restart.
if self.active_pol_id not in self.policies:
return fw_dict
pol_dict = self.policies[self.active_pol_id]
for rule in pol_dict['rule_dict']:
fw_dict['rules'][rule] = self.rules[rule]
return fw_dict | This API creates a FW dictionary from the local attributes. | entailment |
def update_fw_params(self, rtr_id=-1, fw_type=-1):
"""Updates the FW parameters. """
if rtr_id != -1:
self.router_id = rtr_id
if fw_type != -1:
self.fw_type = fw_type | Updates the FW parameters. | entailment |
def populate_cfg_dcnm(self, cfg, dcnm_obj):
"""This routine stores the DCNM object. """
if not self.fw_init:
return
self.dcnm_obj = dcnm_obj
self.fabric.store_dcnm(dcnm_obj)
self.populate_dcnm_obj(dcnm_obj) | This routine stores the DCNM object. | entailment |
def populate_event_queue(self, cfg, que_obj):
"""This routine is for storing the Event Queue obj. """
if not self.fw_init:
return
self.que_obj = que_obj
self.populate_event_que(que_obj) | This routine is for storing the Event Queue obj. | entailment |
def network_sub_create_notif(self, tenant_id, tenant_name, cidr):
"""Network create notification. """
if not self.fw_init:
return
self.network_create_notif(tenant_id, tenant_name, cidr) | Network create notification. | entailment |
def network_del_notif(self, tenant_id, tenant_name, net_id):
"""Network delete notification. """
if not self.fw_init:
return
self.network_delete_notif(tenant_id, tenant_name, net_id) | Network delete notification. | entailment |
def project_create_notif(self, tenant_id, tenant_name):
"""Tenant Create notification. """
if not self.fw_init:
return
self.os_helper.create_router('_'.join([fw_constants.TENANT_EDGE_RTR,
tenant_name]),
tenant_id, []) | Tenant Create notification. | entailment |
def project_delete_notif(self, tenant_id, tenant_name):
"""Tenant Delete notification. """
if not self.fw_init:
return
rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, tenant_name])
self.os_helper.delete_router_by_name(rtr_name, tenant_id) | Tenant Delete notification. | entailment |
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict):
"""Prepares the Fabric and configures the device.
This routine calls the fabric class to prepare the fabric when
a firewall is created. It also calls the device manager to
configure the device. It updates the database with the final
result.
"""
is_fw_virt = self.is_device_virtual()
ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt,
fw_constants.RESULT_FW_CREATE_INIT)
if not ret:
LOG.error("Prepare Fabric failed")
return
else:
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_CREATE_DONE))
ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
self.fwid_attr[tenant_id].fw_drvr_created(True)
self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS')
LOG.info("FW device create returned success for tenant %s",
tenant_id)
else:
LOG.error("FW device create returned failure for tenant %s",
tenant_id) | Prepares the Fabric and configures the device.
This routine calls the fabric class to prepare the fabric when
a firewall is created. It also calls the device manager to
configure the device. It updates the database with the final
result. | entailment |
def _create_fw_fab_dev(self, tenant_id, drvr_name, fw_dict):
"""This routine calls the Tenant Edge routine if FW Type is TE. """
if fw_dict.get('fw_type') == fw_constants.FW_TENANT_EDGE:
self._create_fw_fab_dev_te(tenant_id, drvr_name, fw_dict) | This routine calls the Tenant Edge routine if FW Type is TE. | entailment |
def _check_create_fw(self, tenant_id, drvr_name):
"""Creates the Firewall, if all conditions are met.
This function first checks if all the configuration are done
for a FW to be launched. After that it creates the FW entry in the
DB. After that, it calls the routine to prepare the fabric and
configure the device.
"""
if self.fwid_attr[tenant_id].is_fw_drvr_create_needed():
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
try:
with self.fwid_attr[tenant_id].mutex_lock:
ret = self.add_fw_db(fw_dict.get('fw_id'), fw_dict,
fw_constants.RESULT_FW_CREATE_INIT)
if not ret:
LOG.error("Adding FW DB failed for tenant %s",
tenant_id)
return
self._create_fw_fab_dev(tenant_id, drvr_name, fw_dict)
except Exception as exc:
LOG.error("Exception raised in create fw %s", str(exc)) | Creates the Firewall, if all conditions are met.
This function first checks if all the configuration are done
for a FW to be launched. After that it creates the FW entry in the
DB. After that, it calls the routine to prepare the fabric and
configure the device. | entailment |
def _delete_fw_fab_dev(self, tenant_id, drvr_name, fw_dict):
"""Deletes the Firewall.
This routine calls the fabric class to delete the fabric when
a firewall is deleted. It also calls the device manager to
unconfigure the device. It updates the database with the final
result.
"""
is_fw_virt = self.is_device_virtual()
if self.fwid_attr[tenant_id].is_fw_drvr_created():
ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if not ret:
LOG.error("Error in delete_fabric_fw device for tenant "
"%s", tenant_id)
return False
else:
self.fwid_attr[tenant_id].fw_drvr_created(False)
self.update_fw_db_dev_status(fw_dict.get('fw_id'), '')
ret = self.fabric.delete_fabric_fw(tenant_id, fw_dict, is_fw_virt,
fw_constants.RESULT_FW_DELETE_INIT)
if not ret:
LOG.error("Error in delete_fabric_fw for tenant %s",
tenant_id)
return False
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_DELETE_DONE))
self.delete_fw(fw_dict.get('fw_id'))
return True | Deletes the Firewall.
This routine calls the fabric class to delete the fabric when
a firewall is deleted. It also calls the device manager to
unconfigure the device. It updates the database with the final
result. | entailment |
def _check_delete_fw(self, tenant_id, drvr_name):
"""Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device.
"""
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
ret = False
try:
with self.fwid_attr[tenant_id].mutex_lock:
self.update_fw_db_final_result(fw_dict.get('fw_id'), (
fw_constants.RESULT_FW_DELETE_INIT))
ret = self._delete_fw_fab_dev(tenant_id, drvr_name, fw_dict)
except Exception as exc:
LOG.error("Exception raised in delete fw %s", str(exc))
return ret | Deletes the Firewall, if all conditioms are met.
This function after modifying the DB with delete operation status,
calls the routine to remove the fabric cfg from DB and unconfigure
the device. | entailment |
def _check_update_fw(self, tenant_id, drvr_name):
"""Update the Firewall config by calling the driver.
This function calls the device manager routine to update the device
with modified FW cfg.
"""
if self.fwid_attr[tenant_id].is_fw_complete():
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
self.modify_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) | Update the Firewall config by calling the driver.
This function calls the device manager routine to update the device
with modified FW cfg. | entailment |
def _fw_create(self, drvr_name, data, cache):
"""Firewall create routine.
This function updates its local cache with FW parameters.
It checks if local cache has information about the Policy
associated with the FW. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine.
"""
fw = data.get('firewall')
tenant_id = fw.get('tenant_id')
fw_name = fw.get('name')
fw_id = fw.get('id')
fw_pol_id = fw.get('firewall_policy_id')
admin_state = fw.get('admin_state_up')
rtr_id = None
if 'router_ids' in fw and len(fw.get('router_ids')) != 0:
rtr_id = fw.get('router_ids')[0]
if not admin_state:
LOG.debug("Admin state disabled")
return
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, name])
fw_rtr_name = self.os_helper.get_rtr_name(rtr_id)
fw_type = None
if fw_rtr_name == rtr_name:
fw_type = fw_constants.FW_TENANT_EDGE
if tenant_id not in self.fwid_attr:
self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
tenant_obj = self.fwid_attr[tenant_id]
tenant_obj.create_fw(name, fw_pol_id, fw_id, fw_name, fw_type, rtr_id)
self.tenant_db.store_fw_tenant(fw_id, tenant_id)
if not cache:
self._check_create_fw(tenant_id, drvr_name)
if fw_pol_id is not None and not (
tenant_obj.is_policy_present(fw_pol_id)):
pol_data = self.os_helper.get_fw_policy(fw_pol_id)
if pol_data is not None:
self.fw_policy_create(pol_data, cache=cache) | Firewall create routine.
This function updates its local cache with FW parameters.
It checks if local cache has information about the Policy
associated with the FW. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine. | entailment |
def fw_create(self, data, fw_name=None, cache=False):
"""Top level FW create function. """
LOG.debug("FW create %s", data)
try:
self._fw_create(fw_name, data, cache)
except Exception as exc:
LOG.error("Exception in fw_create %s", str(exc)) | Top level FW create function. | entailment |
def _fw_update(self, drvr_name, data):
"""Update routine for the Firewall.
Check if FW is already cfgd using the below function
if self.fwid_attr[tenant_id].is_fw_complete() or
is_fw_drvr_create_needed():
The above two functions will take care of whether FW is already
cfgd or about to be cfgd in case of error.
If yes, this may be a change in policies attached to FW.
If no, do a check, create after storing the parameters like
rtr_id.
"""
fw = data.get('firewall')
tenant_id = fw.get('tenant_id')
if self.fwid_attr[tenant_id].is_fw_complete() or \
self.fwid_attr[tenant_id].is_fw_drvr_create_needed():
prev_info_complete = True
else:
prev_info_complete = False
tenant_obj = self.fwid_attr[tenant_id]
if 'router_ids' in fw and len(fw.get('router_ids')) != 0:
rtr_id = fw.get('router_ids')[0]
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, name])
fw_rtr_name = self.os_helper.get_rtr_name(rtr_id)
fw_type = None
if fw_rtr_name == rtr_name:
fw_type = fw_constants.FW_TENANT_EDGE
tenant_obj.update_fw_params(rtr_id, fw_type)
if not prev_info_complete:
self._check_create_fw(tenant_id, drvr_name) | Update routine for the Firewall.
Check if FW is already cfgd using the below function
if self.fwid_attr[tenant_id].is_fw_complete() or
is_fw_drvr_create_needed():
The above two functions will take care of whether FW is already
cfgd or about to be cfgd in case of error.
If yes, this may be a change in policies attached to FW.
If no, do a check, create after storing the parameters like
rtr_id. | entailment |
def fw_update(self, data, fw_name=None):
"""Top level FW update function. """
LOG.debug("FW Update %s", data)
self._fw_update(fw_name, data) | Top level FW update function. | entailment |
def _fw_delete(self, drvr_name, data):
"""Firewall Delete routine.
This function calls routines to remove FW from fabric and device.
It also updates its local cache.
"""
fw_id = data.get('firewall_id')
tenant_id = self.tenant_db.get_fw_tenant(fw_id)
if tenant_id not in self.fwid_attr:
LOG.error("Invalid tenant id for FW delete %s", tenant_id)
return
tenant_obj = self.fwid_attr[tenant_id]
ret = self._check_delete_fw(tenant_id, drvr_name)
if ret:
tenant_obj.delete_fw(fw_id)
self.tenant_db.del_fw_tenant(fw_id) | Firewall Delete routine.
This function calls routines to remove FW from fabric and device.
It also updates its local cache. | entailment |
def _fw_rule_decode_store(self, data):
"""Misc function to decode the firewall rule from Openstack. """
fw_rule = data.get('firewall_rule')
rule = {'protocol': fw_rule.get('protocol'),
'source_ip_address': fw_rule.get('source_ip_address'),
'destination_ip_address': fw_rule.get(
'destination_ip_address'),
'source_port': fw_rule.get('source_port'),
'destination_port': fw_rule.get('destination_port'),
'action': fw_rule.get('action'),
'enabled': fw_rule.get('enabled'),
'name': fw_rule.get('name')}
return rule | Misc function to decode the firewall rule from Openstack. | entailment |
def _fw_rule_create(self, drvr_name, data, cache):
"""Firewall Rule create routine.
This function updates its local cache with rule parameters.
It checks if local cache has information about the Policy
associated with the rule. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine.
"""
tenant_id = data.get('firewall_rule').get('tenant_id')
fw_rule = data.get('firewall_rule')
rule = self._fw_rule_decode_store(data)
fw_pol_id = fw_rule.get('firewall_policy_id')
rule_id = fw_rule.get('id')
if tenant_id not in self.fwid_attr:
self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
self.fwid_attr[tenant_id].store_rule(rule_id, rule)
if not cache:
self._check_create_fw(tenant_id, drvr_name)
self.tenant_db.store_rule_tenant(rule_id, tenant_id)
if fw_pol_id is not None and not (
self.fwid_attr[tenant_id].is_policy_present(fw_pol_id)):
pol_data = self.os_helper.get_fw_policy(fw_pol_id)
if pol_data is not None:
self.fw_policy_create(pol_data, cache=cache) | Firewall Rule create routine.
This function updates its local cache with rule parameters.
It checks if local cache has information about the Policy
associated with the rule. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine. | entailment |
def fw_rule_create(self, data, fw_name=None, cache=False):
"""Top level rule creation routine. """
LOG.debug("FW Rule create %s", data)
self._fw_rule_create(fw_name, data, cache) | Top level rule creation routine. | entailment |
def _fw_rule_delete(self, drvr_name, data):
"""Function that updates its local cache after a rule is deleted. """
rule_id = data.get('firewall_rule_id')
tenant_id = self.tenant_db.get_rule_tenant(rule_id)
if tenant_id not in self.fwid_attr:
LOG.error("Invalid tenant id for FW delete %s", tenant_id)
return
tenant_obj = self.fwid_attr[tenant_id]
# Guess actual FW/policy need not be deleted if this is the active
# rule, Openstack does not allow it to be deleted
tenant_obj.delete_rule(rule_id)
self.tenant_db.del_rule_tenant(rule_id) | Function that updates its local cache after a rule is deleted. | entailment |
def fw_rule_delete(self, data, fw_name=None):
"""Top level rule delete function. """
LOG.debug("FW Rule delete %s", data)
self._fw_rule_delete(fw_name, data) | Top level rule delete function. | entailment |
def _fw_rule_update(self, drvr_name, data):
"""Firewall Rule update routine.
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules.
"""
LOG.debug("FW Update %s", data)
tenant_id = data.get('firewall_rule').get('tenant_id')
fw_rule = data.get('firewall_rule')
rule = self._fw_rule_decode_store(data)
rule_id = fw_rule.get('id')
if tenant_id not in self.fwid_attr or not (
self.fwid_attr[tenant_id].is_rule_present(rule_id)):
LOG.error("Incorrect update info for tenant %s", tenant_id)
return
self.fwid_attr[tenant_id].rule_update(rule_id, rule)
self._check_update_fw(tenant_id, drvr_name) | Firewall Rule update routine.
Function to decode the updated rules and call routines that
in turn calls the device routines to update rules. | entailment |
def fw_rule_update(self, data, fw_name=None):
"""Top level rule update routine. """
LOG.debug("FW Update Debug")
self._fw_rule_update(fw_name, data) | Top level rule update routine. | entailment |
def _fw_policy_delete(self, drvr_name, data):
"""Routine to delete the policy from local cache. """
policy_id = data.get('firewall_policy_id')
tenant_id = self.tenant_db.get_policy_tenant(policy_id)
if tenant_id not in self.fwid_attr:
LOG.error("Invalid tenant id for FW delete %s", tenant_id)
return
tenant_obj = self.fwid_attr[tenant_id]
# Guess actual FW need not be deleted since if this is the active
# policy, Openstack does not allow it to be deleted
tenant_obj.delete_policy(policy_id)
self.tenant_db.del_policy_tenant(policy_id) | Routine to delete the policy from local cache. | entailment |
def fw_policy_delete(self, data, fw_name=None):
"""Top level policy delete routine. """
LOG.debug("FW Policy Debug")
self._fw_policy_delete(fw_name, data) | Top level policy delete routine. | entailment |
def _fw_policy_create(self, drvr_name, data, cache):
"""Firewall Policy create routine.
This function updates its local cache with policy parameters.
It checks if local cache has information about the rules
associated with the policy. If not, it means a restart has
happened. It retrieves the rules associated with the policy by
calling Openstack API's and calls the rule create internal routine.
"""
policy = {}
fw_policy = data.get('firewall_policy')
tenant_id = fw_policy.get('tenant_id')
LOG.info("Creating policy for tenant %s", tenant_id)
policy_id = fw_policy.get('id')
policy_name = fw_policy.get('name')
pol_rule_dict = fw_policy.get('firewall_rules')
if tenant_id not in self.fwid_attr:
self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
policy['name'] = policy_name
policy['rule_dict'] = pol_rule_dict
self.fwid_attr[tenant_id].store_policy(policy_id, policy)
if not cache:
self._check_create_fw(tenant_id, drvr_name)
self.tenant_db.store_policy_tenant(policy_id, tenant_id)
for rule in pol_rule_dict:
rule_id = rule
if not self.fwid_attr[tenant_id].is_rule_present(rule_id):
rule_data = self.os_helper.get_fw_rule(rule_id)
if rule_data is not None:
self.fw_rule_create(rule_data, cache=cache) | Firewall Policy create routine.
This function updates its local cache with policy parameters.
It checks if local cache has information about the rules
associated with the policy. If not, it means a restart has
happened. It retrieves the rules associated with the policy by
calling Openstack API's and calls the rule create internal routine. | entailment |
def fw_policy_create(self, data, fw_name=None, cache=False):
"""Top level policy create routine. """
LOG.debug("FW Policy Debug")
self._fw_policy_create(fw_name, data, cache) | Top level policy create routine. | entailment |
def convert_fwdb_event_msg(self, rule, tenant_id, rule_id, policy_id):
"""Convert the Firewall DB to a event message format.
From inputs from DB, this will create a FW rule dictionary that
resembles the actual data from Openstack when a rule is created.
This is usually called after restart, in order to populate local
cache.
"""
rule.update({'tenant_id': tenant_id, 'id': rule_id,
'firewall_policy_id': policy_id})
fw_rule_data = {'firewall_rule': rule}
return fw_rule_data | Convert the Firewall DB to a event message format.
From inputs from DB, this will create a FW rule dictionary that
resembles the actual data from Openstack when a rule is created.
This is usually called after restart, in order to populate local
cache. | entailment |
def convert_fwdb(self, tenant_id, name, policy_id, fw_id):
"""Convert the Firewall DB to a query response.
From FWDB inputs, this will create a FW message that resembles the
actual data from Openstack, when a query for FW is done.
"""
fw_dict = {'tenant_id': tenant_id, 'name': name, 'id': fw_id,
'firewall_policy_id': policy_id,
'admin_state_up': True}
fw_data = {'firewall': fw_dict}
return fw_data | Convert the Firewall DB to a query response.
From FWDB inputs, this will create a FW message that resembles the
actual data from Openstack, when a query for FW is done. | entailment |
def populate_local_cache(self):
"""This populates the local cache after reading the Database.
It calls the appropriate rule create, fw create routines.
It doesn't actually call the routine to prepare the fabric or cfg the
device since it will be handled by retry module.
"""
fw_dict = self.get_all_fw_db()
LOG.info("Populating FW Mgr Local Cache")
for fw_id in fw_dict:
fw_data = fw_dict.get(fw_id)
tenant_id = fw_data.get('tenant_id')
rule_dict = fw_data.get('rules').get('rules')
policy_id = fw_data.get('rules').get('firewall_policy_id')
for rule in rule_dict:
fw_evt_data = self.convert_fwdb_event_msg(rule_dict.get(rule),
tenant_id, rule,
policy_id)
LOG.info("Populating Rules for tenant %s", tenant_id)
self.fw_rule_create(fw_evt_data, cache=True)
fw_os_data = self.os_helper.get_fw(fw_id)
# If enabler is stopped and FW is deleted, then the above routine
# will fail.
if fw_os_data is None:
fw_os_data = self.convert_fwdb(tenant_id, fw_data.get('name'),
policy_id, fw_id)
LOG.info("Populating FW for tenant %s", tenant_id)
self.fw_create(fw_os_data, cache=True)
if fw_data.get('device_status') == 'SUCCESS':
self.fwid_attr[tenant_id].fw_drvr_created(True)
else:
self.fwid_attr[tenant_id].fw_drvr_created(False)
return fw_dict | This populates the local cache after reading the Database.
It calls the appropriate rule create, fw create routines.
It doesn't actually call the routine to prepare the fabric or cfg the
device since it will be handled by retry module. | entailment |
def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict):
"""This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
is_fw_virt = self.is_device_virtual()
# Fabric portion
if result == fw_constants.RESULT_FW_CREATE_INIT:
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
is_fw_virt, result)
if not ret:
LOG.error("Retry failure returned fail for tenant %s",
tenant_id)
return
else:
result = fw_constants.RESULT_FW_CREATE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
# Device portion
if result == fw_constants.RESULT_FW_CREATE_DONE:
if fw_data.get('device_status') != 'SUCCESS':
ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
self.fwid_attr[tenant_id].fw_drvr_created(True)
self.update_fw_db_dev_status(fw_dict.get('fw_id'),
'SUCCESS')
LOG.info("Retry failue return success for create"
" tenant %s", tenant_id) | This module calls routine in fabric to retry the failure cases.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device. | entailment |
def retry_failure_fab_dev_delete(self, tenant_id, fw_data, fw_dict):
"""Retry the failure cases for delete.
This module calls routine in fabric to retry the failure cases for
delete.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device.
"""
result = fw_data.get('result').split('(')[0]
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
fw_dict['tenant_name'] = name
is_fw_virt = self.is_device_virtual()
if result == fw_constants.RESULT_FW_DELETE_INIT:
if self.fwid_attr[tenant_id].is_fw_drvr_created():
ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'),
fw_dict)
if ret:
# Device portion
self.update_fw_db_dev_status(fw_dict.get('fw_id'),
'')
self.fwid_attr[tenant_id].fw_drvr_created(False)
LOG.info("Retry failue dev return success for delete"
" tenant %s", tenant_id)
else:
return
name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id)
ret = self.fabric.retry_failure(tenant_id, name, fw_dict,
is_fw_virt, result)
if not ret:
LOG.error("Retry failure returned fail for tenant %s",
tenant_id)
return
result = fw_constants.RESULT_FW_DELETE_DONE
self.update_fw_db_final_result(fw_dict.get('fw_id'), result)
self.delete_fw(fw_dict.get('fw_id'))
self.fwid_attr[tenant_id].delete_fw(fw_dict.get('fw_id'))
self.tenant_db.del_fw_tenant(fw_dict.get('fw_id')) | Retry the failure cases for delete.
This module calls routine in fabric to retry the failure cases for
delete.
If device is not successfully cfg/uncfg, it calls the device manager
routine to cfg/uncfg the device. | entailment |
def fw_retry_failures_create(self):
"""This module is called for retrying the create cases. """
for tenant_id in self.fwid_attr:
try:
with self.fwid_attr[tenant_id].mutex_lock:
if self.fwid_attr[tenant_id].is_fw_drvr_create_needed():
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
if fw_dict:
fw_obj, fw_data = self.get_fw(fw_dict.get('fw_id'))
self.retry_failure_fab_dev_create(tenant_id,
fw_data,
fw_dict)
else:
LOG.error("FW data not found for tenant %s",
tenant_id)
except Exception as exc:
LOG.error("Exception in retry failure create %s",
str(exc)) | This module is called for retrying the create cases. | entailment |
def fill_fw_dict_from_db(self, fw_data):
"""
This routine is called to create a local fw_dict with data from DB.
"""
rule_dict = fw_data.get('rules').get('rules')
fw_dict = {'fw_id': fw_data.get('fw_id'),
'fw_name': fw_data.get('name'),
'firewall_policy_id': fw_data.get('firewall_policy_id'),
'fw_type': fw_data.get('fw_type'),
'router_id': fw_data.get('router_id'), 'rules': {}}
for rule in rule_dict:
fw_dict['rules'][rule] = rule_dict.get(rule)
return fw_dict | This routine is called to create a local fw_dict with data from DB. | entailment |
def fw_retry_failures_delete(self):
"""This routine is called for retrying the delete cases. """
for tenant_id in self.fwid_attr:
try:
with self.fwid_attr[tenant_id].mutex_lock:
# For both create and delete case
fw_data = self.get_fw_by_tenant_id(tenant_id)
if fw_data is None:
LOG.info("No FW for tenant %s", tenant_id)
continue
result = fw_data.get('result').split('(')[0]
if result == fw_constants.RESULT_FW_DELETE_INIT:
fw_dict = self.fwid_attr[tenant_id].get_fw_dict()
# This means a restart has happened before the FW is
# completely deleted
if not fw_dict:
# Need to fill fw_dict from fw_data
fw_dict = self.fill_fw_dict_from_db(fw_data)
self.retry_failure_fab_dev_delete(tenant_id, fw_data,
fw_dict)
except Exception as exc:
LOG.error("Exception in retry failure delete %s",
str(exc)) | This routine is called for retrying the delete cases. | entailment |
def fw_retry_failures(self):
"""Top level retry routine called. """
if not self.fw_init:
return
try:
self.fw_retry_failures_create()
self.fw_retry_failures_delete()
except Exception as exc:
LOG.error("Exception in retry failures %s", str(exc)) | Top level retry routine called. | entailment |
def _find_starts(self, linespec):
"""
Finds the start points.
Start points matching the linespec regex are returned as list in the
following format:
[(item, index), (item, index).....
"""
linespec += ".*"
start_points = []
for item in self._indent_list:
match = re.search(linespec, item[1])
if match:
entry = (item, self._indent_list.index(item))
start_points.append(entry)
return start_points | Finds the start points.
Start points matching the linespec regex are returned as list in the
following format:
[(item, index), (item, index)..... | entailment |
def find_lines(self, linespec):
"""Find lines that match the linespec regex."""
res = []
linespec += ".*"
for line in self.cfg:
match = re.search(linespec, line)
if match:
res.append(match.group(0))
return res | Find lines that match the linespec regex. | entailment |
def find_objects(self, linespec):
"""Find lines that match the linespec regex.
:param linespec: regular expression of line to match
:return: list of LineItem objects
"""
# Note(asr1kteam): In this code we are only adding children one-level
# deep to a given parent (linespec), as that satisfies the IOS conf
# parsing.
# Note(asr1kteam): Not tested with tabs in the config. Currently used
# with IOS config where we haven't seen tabs, but may be needed for a
# more general case.
res = []
self._build_indent_based_list()
for item, index in self._find_starts(linespec):
parent = LineItem(item[1])
next_ident_level = self._find_next_indent_level(index)
if next_ident_level:
# We start iterating from the next element
for item in self._indent_list[(index + 1):]:
if item[0] == next_ident_level:
parent.add_children(LineItem(item[1]))
elif item[0] > next_ident_level: # We skip higher indent
continue
else: # Indent level is same or lesser than item
break
res.append(parent)
return res | Find lines that match the linespec regex.
:param linespec: regular expression of line to match
:return: list of LineItem objects | entailment |
def find_children(self, linespec):
"""Find lines and immediate children that match the linespec regex.
:param linespec: regular expression of line to match
:returns: list of lines. These correspond to the lines that were
matched and their immediate children
"""
res = []
for parent in self.find_objects(linespec):
res.append(parent.line)
res.extend([child.line for child in parent.children])
return res | Find lines and immediate children that match the linespec regex.
:param linespec: regular expression of line to match
:returns: list of lines. These correspond to the lines that were
matched and their immediate children | entailment |
def enable_lldp(self, port_name, is_ncb=True, is_nb=False):
"""Function to enable LLDP on the interface. """
if is_ncb:
self.run_lldptool(["-L", "-i", port_name, "-g", "ncb",
"adminStatus=rxtx"])
if is_nb:
self.run_lldptool(["-L", "-i", port_name, "-g", "nb",
"adminStatus=rxtx"]) | Function to enable LLDP on the interface. | entailment |
def enable_lldp(self):
"""Function to enable LLDP on the interface. """
if self.is_ncb:
self.run_lldptool(["-L", "-i", self.port_name, "-g", "ncb",
"adminStatus=rxtx"])
if self.is_nb:
self.run_lldptool(["-L", "-i", self.port_name, "-g", "nb",
"adminStatus=rxtx"]) | Function to enable LLDP on the interface. | entailment |
def enable_evb(self):
"""Function to enable EVB on the interface. """
if self.is_ncb:
self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb",
"-V", "evb", "enableTx=yes"])
ret = self.enable_gpid()
return ret
else:
LOG.error("EVB cannot be set on NB")
return False | Function to enable EVB on the interface. | entailment |
def enable_gpid(self):
"""Function to enable Group ID on the interface.
This is needed to use the MAC, GID, VID Filter.
"""
if self.is_ncb:
self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb",
"-V", "evb", "-c", "evbgpid=yes"])
return True
else:
LOG.error("GPID cannot be set on NB")
return False | Function to enable Group ID on the interface.
This is needed to use the MAC, GID, VID Filter. | entailment |
def _vdp_refrsh_hndlr(self):
"""Periodic refresh of vNIC events to VDP.
VDP daemon itself has keepalives. This is needed on top of it
to keep Orchestrator like OpenStack, VDP daemon and the physical
switch in sync.
"""
LOG.debug("Refresh handler")
try:
if not self.vdp_vif_map:
LOG.debug("vdp_vif_map not created, returning")
return
vdp_vif_map = dict.copy(self.vdp_vif_map)
oui_vif_map = dict.copy(self.oui_vif_map)
for key in six.iterkeys(vdp_vif_map):
lvdp_dict = vdp_vif_map.get(key)
loui_dict = oui_vif_map.get(key)
if not lvdp_dict:
return
if not loui_dict:
oui_id = ""
oui_data = ""
else:
oui_id = loui_dict.get('oui_id')
oui_data = loui_dict.get('oui_data')
with self.mutex_lock:
if key in self.vdp_vif_map:
LOG.debug("Sending Refresh for VSI %s", lvdp_dict)
vdp_vlan, fail_reason = self.send_vdp_assoc(
vsiid=lvdp_dict.get('vsiid'),
mgrid=lvdp_dict.get('mgrid'),
typeid=lvdp_dict.get('typeid'),
typeid_ver=lvdp_dict.get('typeid_ver'),
vsiid_frmt=lvdp_dict.get('vsiid_frmt'),
filter_frmt=lvdp_dict.get('filter_frmt'),
gid=lvdp_dict.get('gid'),
mac=lvdp_dict.get('mac'),
vlan=0, oui_id=oui_id, oui_data=oui_data,
sw_resp=True)
# check validity.
if not utils.is_valid_vlan_tag(vdp_vlan):
LOG.error("Returned vlan %(vlan)s is invalid.",
{'vlan': vdp_vlan})
# Need to invoke CB. So no return here.
vdp_vlan = 0
exist_vdp_vlan = lvdp_dict.get('vdp_vlan')
exist_fail_reason = lvdp_dict.get('fail_reason')
callback_count = lvdp_dict.get('callback_count')
# Condition will be hit only during error cases when switch
# reloads or when compute reloads
if vdp_vlan != exist_vdp_vlan or (
fail_reason != exist_fail_reason or
callback_count > vdp_const.CALLBACK_THRESHOLD):
# Invoke the CB Function
cb_fn = lvdp_dict.get('vsw_cb_fn')
cb_data = lvdp_dict.get('vsw_cb_data')
if cb_fn:
cb_fn(cb_data, vdp_vlan, fail_reason)
lvdp_dict['vdp_vlan'] = vdp_vlan
lvdp_dict['fail_reason'] = fail_reason
lvdp_dict['callback_count'] = 0
else:
lvdp_dict['callback_count'] += 1
except Exception as e:
LOG.error("Exception in Refrsh %s", str(e)) | Periodic refresh of vNIC events to VDP.
VDP daemon itself has keepalives. This is needed on top of it
to keep Orchestrator like OpenStack, VDP daemon and the physical
switch in sync. | entailment |
def run_lldptool(self, args):
"""Function for invoking the lldptool utility. """
full_args = ['lldptool'] + args
try:
utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': full_args, 'exception': e}) | Function for invoking the lldptool utility. | entailment |
def store_oui(self, port_uuid, oui_type, oui_data):
"""Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data
"""
self.oui_vif_map[port_uuid] = {'oui_id': oui_type,
'oui_data': oui_data} | Function for storing the OUI.
param uuid: UUID of the vNIC
param oui_type: OUI ID
param oui_data: OUI Opaque Data | entailment |
def store_vdp_vsi(self, port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, reason):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason
"""
if port_uuid in self.vdp_vif_map:
LOG.debug("Not Storing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': mac, 'uuid': vsiid})
if new_network:
vdp_vlan = reply
else:
vdp_vlan = vlan
vdp_dict = {'vdp_vlan': vdp_vlan,
'mgrid': mgrid,
'typeid': typeid,
'typeid_ver': typeid_ver,
'vsiid_frmt': vsiid_frmt,
'vsiid': vsiid,
'filter_frmt': filter_frmt,
'mac': mac,
'gid': gid,
'vsw_cb_fn': vsw_cb_fn,
'vsw_cb_data': vsw_cb_data,
'fail_reason': reason,
'callback_count': 0}
self.vdp_vif_map[port_uuid] = vdp_dict
LOG.debug("Storing VDP VSI MAC %(mac)s UUID %(uuid)s VDP VLAN "
"%(vlan)s", {'mac': mac, 'uuid': vsiid, 'vlan': vdp_vlan})
if oui_id:
self.store_oui(port_uuid, oui_id, oui_data) | Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param new_network: Is this the first vNIC of this network
:param reply: Response from the switch
:param oui_id: OUI Type
:param oui_data: OUI Data
:param vsw_cb_fn: Callback function from the app.
:param vsw_cb_data: Callback data for the app.
:param reason: Failure Reason | entailment |
def clear_oui(self, port_uuid):
"""Clears the OUI specific info.
:param uuid: vNIC UUID
Currently only one OUI per VSI fixme(padkrish)
"""
if port_uuid in self.oui_vif_map:
del self.oui_vif_map[port_uuid]
else:
LOG.debug("OUI does not exist") | Clears the OUI specific info.
:param uuid: vNIC UUID
Currently only one OUI per VSI fixme(padkrish) | entailment |
def clear_vdp_vsi(self, port_uuid):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
"""
try:
LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': self.vdp_vif_map[port_uuid].get('mac'),
'uuid': self.vdp_vif_map[port_uuid].get('vsiid')})
del self.vdp_vif_map[port_uuid]
except Exception:
LOG.error("VSI does not exist")
self.clear_oui(port_uuid) | Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID | entailment |
def gen_cisco_vdp_oui(self, oui_id, oui_data):
"""Cisco specific handler for constructing OUI arguments. """
oui_list = []
vm_name = oui_data.get('vm_name')
if vm_name is not None:
oui_str = "oui=%s," % oui_id
oui_name_str = oui_str + "vm_name=" + vm_name
oui_list.append(oui_name_str)
ip_addr = oui_data.get('ip_addr')
if ip_addr is not None:
oui_str = "oui=%s," % oui_id
ip_addr_str = oui_str + "ipv4_addr=" + ip_addr
oui_list.append(ip_addr_str)
vm_uuid = oui_data.get('vm_uuid')
if vm_uuid is not None:
oui_str = "oui=%s," % oui_id
vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid
oui_list.append(vm_uuid_str)
return oui_list | Cisco specific handler for constructing OUI arguments. | entailment |
def gen_oui_str(self, oui_list):
"""Generate the OUI string for vdptool. """
oui_str = []
for oui in oui_list:
oui_str.append('-c')
oui_str.append(oui)
return oui_str | Generate the OUI string for vdptool. | entailment |
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:return vdp_keyword_str: Dictionary of VDP arguments and values
"""
vdp_keyword_str = {}
if mgrid is None:
mgrid = self.vdp_opts.get('mgrid')
mgrid_str = "mgrid2=%s" % mgrid
if typeid is None:
typeid = self.vdp_opts.get('typeid')
typeid_str = "typeid=%s" % typeid
if typeid_ver is None:
typeid_ver = self.vdp_opts.get('typeidver')
typeid_ver_str = "typeidver=%s" % typeid_ver
if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')):
vsiid_str = "uuid=%s" % vsiid
else:
# Only format supported for now
LOG.error("Unsupported VSIID Format1")
return vdp_keyword_str
if vlan == constants.INVALID_VLAN:
vlan = 0
if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID:
if not mac or gid == 0:
LOG.error("Incorrect Filter Format Specified")
return vdp_keyword_str
else:
f = "filter=%s-%s-%s"
filter_str = f % (vlan, mac, gid)
elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID:
if gid == 0:
LOG.error("NULL GID Specified")
return vdp_keyword_str
else:
filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid
elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID:
if not mac:
LOG.error("NULL MAC Specified")
return vdp_keyword_str
else:
filter_str = "filter=" + '%d' % vlan + "-" + mac
elif int(filter_frmt) == vdp_const.VDP_FILTER_VID:
filter_str = "filter=" + '%d' % vlan
else:
LOG.error("Incorrect Filter Format Specified")
return vdp_keyword_str
oui_list = []
if oui_id is not None and oui_data is not None:
if oui_id is 'cisco':
oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data)
mode_str = "mode=" + mode
vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str,
typeid=typeid_str, typeid_ver=typeid_ver_str,
vsiid=vsiid_str, filter=filter_str,
oui_list=oui_list)
return vdp_keyword_str | Constructs the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:return vdp_keyword_str: Dictionary of VDP arguments and values | entailment |
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id,
oui_data):
"""Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error("EVB cannot be set on NB")
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
None, None)
if len(vdp_key_str) == 0:
LOG.error("NULL List")
return
reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode,
"-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'],
"-c", vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'],
"-c", vdp_key_str['vsiid']])
return reply | Constructs and Sends the VDP Query Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool | entailment |
def send_vdp_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt,
vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data,
sw_resp):
"""Constructs and Sends the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool
"""
if not self.is_ncb:
LOG.error("EVB cannot be set on NB")
return
vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid,
typeid_ver, vsiid_frmt, vsiid,
filter_frmt, gid, mac, vlan,
oui_id, oui_data)
if len(vdp_key_str) == 0:
LOG.error("NULL List")
return
oui_cmd_str = self.gen_oui_str(vdp_key_str['oui_list'])
if sw_resp:
# If filter is not VID and if VLAN is 0, Query for the TLV first,
# if found VDP will return the VLAN. Add support for this once
# vdptool has the support for querying exact VSI filters
# fixme(padkrish)
reply = self.run_vdptool(["-T", "-i", self.port_name, "-W",
"-V", mode, "-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'], "-c",
vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'], "-c",
vdp_key_str['vsiid'], "-c",
"hints=none", "-c",
vdp_key_str['filter']],
oui_args=oui_cmd_str)
else:
reply = self.run_vdptool(["-T", "-i", self.port_name,
"-V", mode, "-c", vdp_key_str['mode'],
"-c", vdp_key_str['mgrid'], "-c",
vdp_key_str['typeid'],
"-c", vdp_key_str['typeid_ver'], "-c",
vdp_key_str['vsiid'], "-c",
"hints=none", "-c",
vdp_key_str['filter']],
oui_args=oui_cmd_str)
return reply | Constructs and Sends the VDP Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param mode: Associate or De-associate
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param vsiid: VSI value
:param filter_frmt: Filter Format
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: Reply from vdptool | entailment |
def crosscheck_query_vsiid_mac(self, reply, vsiid, mac):
"""Cross Check the reply against the input vsiid,mac for get query. """
vsiid_reply = reply.partition("uuid")[2].split()[0][4:]
if vsiid != vsiid_reply:
fail_reason = vdp_const.vsi_mismatch_failure_reason % (
vsiid, vsiid_reply)
LOG.error("%s", fail_reason)
return False, fail_reason
mac_reply = reply.partition("filter")[2].split('-')[1]
if mac != mac_reply:
fail_reason = vdp_const.mac_mismatch_failure_reason % (
mac, mac_reply)
LOG.error("%s", fail_reason)
return False, fail_reason
return True, None | Cross Check the reply against the input vsiid,mac for get query. | entailment |
def get_vdp_failure_reason(self, reply):
"""Parse the failure reason from VDP. """
try:
fail_reason = reply.partition(
"filter")[0].replace('\t', '').split('\n')[-2]
if len(fail_reason) == 0:
fail_reason = vdp_const.retrieve_failure_reason % (reply)
except Exception:
fail_reason = vdp_const.retrieve_failure_reason % (reply)
return fail_reason | Parse the failure reason from VDP. | entailment |
def check_filter_validity(self, reply, filter_str):
"""Check for the validify of the filter. """
try:
f_ind = reply.index(filter_str)
l_ind = reply.rindex(filter_str)
except Exception:
fail_reason = vdp_const.filter_failure_reason % (reply)
LOG.error("%s", fail_reason)
return False, fail_reason
if f_ind != l_ind:
# Currently not supported if reply contains a filter keyword
fail_reason = vdp_const.multiple_filter_failure_reason % (reply)
LOG.error("%s", fail_reason)
return False, fail_reason
return True, None | Check for the validify of the filter. | entailment |
def get_vlan_from_associate_reply(self, reply, vsiid, mac):
"""Parse the associate reply from VDP daemon to get the VLAN value. """
try:
verify_flag, fail_reason = self.crosscheck_reply_vsiid_mac(
reply, vsiid, mac)
if not verify_flag:
return constants.INVALID_VLAN, fail_reason
mode_str = reply.partition("mode = ")[2].split()[0]
if mode_str != "assoc":
fail_reason = self.get_vdp_failure_reason(reply)
return constants.INVALID_VLAN, fail_reason
except Exception:
fail_reason = vdp_const.mode_failure_reason % (reply)
LOG.error("%s", fail_reason)
return constants.INVALID_VLAN, fail_reason
check_filter, fail_reason = self.check_filter_validity(
reply, "filter = ")
if not check_filter:
return constants.INVALID_VLAN, fail_reason
try:
vlan_val = reply.partition("filter = ")[2].split('-')[0]
vlan = int(vlan_val)
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error("%s", fail_reason)
return constants.INVALID_VLAN, fail_reason
return vlan, None | Parse the associate reply from VDP daemon to get the VLAN value. | entailment |
def check_hints(self, reply):
"""Parse the hints to check for errors. """
try:
f_ind = reply.index("hints")
l_ind = reply.rindex("hints")
except Exception:
fail_reason = vdp_const.hints_failure_reason % (reply)
LOG.error("%s", fail_reason)
return False, fail_reason
if f_ind != l_ind:
# Currently not supported if reply contains a filter keyword
fail_reason = vdp_const.multiple_hints_failure_reason % (reply)
LOG.error("%s", fail_reason)
return False, fail_reason
try:
hints_compl = reply.partition("hints")[2]
hints_val = reply.partition("hints")[2][0:4]
len_hints = int(hints_val)
hints_val = hints_compl[4:4 + len_hints]
hints = int(hints_val)
if hints != 0:
fail_reason = vdp_const.nonzero_hints_failure % (hints)
return False, fail_reason
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error("%s", fail_reason)
return False, fail_reason
return True, None | Parse the hints to check for errors. | entailment |
def get_vlan_from_query_reply(self, reply, vsiid, mac):
"""Parse the query reply from VDP daemon to get the VLAN value. """
hints_ret, fail_reason = self.check_hints(reply)
if not hints_ret:
LOG.error("Incorrect hints found %s", reply)
return constants.INVALID_VLAN, fail_reason
check_filter, fail_reason = self.check_filter_validity(reply, "filter")
if not check_filter:
return constants.INVALID_VLAN, fail_reason
try:
verify_flag, fail_reason = self.crosscheck_query_vsiid_mac(
reply, vsiid, mac)
if not verify_flag:
return constants.INVALID_VLAN, fail_reason
filter_val = reply.partition("filter")[2]
len_fil = len(filter_val)
vlan_val = filter_val[4:len_fil].split('-')[0]
vlan = int(vlan_val)
except ValueError:
fail_reason = vdp_const.format_failure_reason % (reply)
LOG.error("%s", fail_reason)
return constants.INVALID_VLAN, fail_reason
return vlan, None | Parse the query reply from VDP daemon to get the VLAN value. | entailment |
def send_vdp_assoc(self, vsiid=None, mgrid=None, typeid=None,
typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0,
mac="", vlan=0, oui_id="", oui_data="", sw_resp=False):
"""Sends the VDP Associate Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return vlan: VLAN value returned by vdptool which in turn is given
: by Switch
"""
if sw_resp and filter_frmt == vdp_const.VDP_FILTER_GIDMACVID:
reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt,
gid, mac, vlan, oui_id, oui_data)
vlan_resp, fail_reason = self.get_vlan_from_query_reply(
reply, vsiid, mac)
if vlan_resp != constants.INVALID_VLAN:
return vlan_resp, fail_reason
reply = self.send_vdp_msg("assoc", mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac,
vlan, oui_id, oui_data, sw_resp)
if sw_resp:
vlan, fail_reason = self.get_vlan_from_associate_reply(
reply, vsiid, mac)
return vlan, fail_reason
return None, None | Sends the VDP Associate Message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return vlan: VLAN value returned by vdptool which in turn is given
: by Switch | entailment |
def send_vdp_vnic_up(self, port_uuid=None, vsiid=None,
mgrid=None, typeid=None, typeid_ver=None,
vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID,
gid=0, mac="", vlan=0, oui=None,
new_network=False, vsw_cb_fn=None, vsw_cb_data=None):
"""Interface function to apps, called for a vNIC UP.
This currently sends an VDP associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: VLAN reply from vdptool
"""
if oui is None:
oui = {}
oui_id = None
oui_data = None
if 'oui_id' in oui:
oui_id = oui['oui_id']
oui_data = oui
reply, fail_reason = self.send_vdp_assoc(
vsiid=vsiid, mgrid=mgrid, typeid=typeid, typeid_ver=typeid_ver,
vsiid_frmt=vsiid_frmt, filter_frmt=filter_frmt, gid=gid, mac=mac,
vlan=vlan, oui_id=oui_id, oui_data=oui_data, sw_resp=new_network)
self.store_vdp_vsi(port_uuid, mgrid, typeid, typeid_ver,
vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan,
new_network, reply, oui_id, oui_data, vsw_cb_fn,
vsw_cb_data, fail_reason)
return reply, fail_reason | Interface function to apps, called for a vNIC UP.
This currently sends an VDP associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
:return reply: VLAN reply from vdptool | entailment |
def send_vdp_vnic_down(self, port_uuid=None, vsiid=None, mgrid=None,
typeid=None, typeid_ver=None,
vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID,
filter_frmt=vdp_const.VDP_FILTER_GIDMACVID,
gid=0, mac="", vlan=0, oui=""):
"""Interface function to apps, called for a vNIC DOWN.
This currently sends an VDP dis-associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon
"""
# Correct non-zero VLAN needs to be specified
try:
with self.mutex_lock:
self.send_vdp_deassoc(vsiid=vsiid, mgrid=mgrid, typeid=typeid,
typeid_ver=typeid_ver,
vsiid_frmt=vsiid_frmt,
filter_frmt=filter_frmt, gid=gid,
mac=mac, vlan=vlan)
self.clear_vdp_vsi(port_uuid)
except Exception as e:
LOG.error("VNIC Down exception %s", e) | Interface function to apps, called for a vNIC DOWN.
This currently sends an VDP dis-associate message.
Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP
Section for more detailed information
:param uuid: uuid of the vNIC
:param vsiid: VSI value, Only UUID supported for now
:param mgrid: MGR ID
:param typeid: Type ID
:param typeid_ver: Version of the Type ID
:param vsiid_frmt: Format of the following VSI argument
:param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now
:param gid: Group ID the vNIC belongs to
:param mac: MAC Address of the vNIC
:param vlan: VLAN of the vNIC
:param oui_id: OUI Type
:param oui_data: OUI Data
:param sw_resp: Flag indicating if response is required from the daemon | entailment |
def run_vdptool(self, args, oui_args=None):
"""Function that runs the vdptool utility. """
if oui_args is None:
oui_args = []
full_args = ['vdptool'] + args + oui_args
try:
return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': full_args, 'exception': e}) | Function that runs the vdptool utility. | entailment |
def get_routers(self, context, router_ids=None, hd_ids=None):
"""Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned.
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
router_ids=router_ids, hosting_device_ids=hd_ids) | Make a remote process call to retrieve the sync data for routers.
:param context: session context
:param router_ids: list of routers to fetch
:param hd_ids: hosting device ids, only routers assigned to these
hosting devices will be returned. | entailment |
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Make a remote process call to update operational status for one or
several floating IPs.
@param context: contains user information
@param router_id: id of router associated with the floatingips
@param fip_statuses: dict with floatingip_id as key and status as value
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_floatingip_statuses_cfg',
router_id=router_id, fip_statuses=fip_statuses) | Make a remote process call to update operational status for one or
several floating IPs.
@param context: contains user information
@param router_id: id of router associated with the floatingips
@param fip_statuses: dict with floatingip_id as key and status as value | entailment |
def send_update_port_statuses(self, context, port_ids, status):
"""Call the pluging to update the port status which updates the DB.
:param context: contains user information
:param port_ids: list of ids of the ports associated with the status
:param status: value of the status for the given port list (port_ids)
"""
cctxt = self.client.prepare(version='1.1')
return cctxt.call(context, 'update_port_statuses_cfg',
port_ids=port_ids, status=status) | Call the pluging to update the port status which updates the DB.
:param context: contains user information
:param port_ids: list of ids of the ports associated with the status
:param status: value of the status for the given port list (port_ids) | entailment |
def router_deleted(self, context, routers):
"""Deal with router deletion RPC message."""
LOG.debug('Got router deleted notification for %s', routers)
self._update_removed_routers_cache(routers) | Deal with router deletion RPC message. | entailment |
def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message."""
LOG.debug('Got routers updated notification :%s', routers)
if routers:
# This is needed for backward compatibility
if isinstance(routers[0], dict):
routers = [router['id'] for router in routers]
self._update_updated_routers_cache(routers) | Deal with routers modification and creation RPC message. | entailment |
def collect_state(self, configurations):
"""Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values
"""
num_ex_gw_ports = 0
num_interfaces = 0
num_floating_ips = 0
router_infos = self.router_info.values()
num_routers = len(router_infos)
num_hd_routers = collections.defaultdict(int)
for ri in router_infos:
ex_gw_port = ri.router.get('gw_port')
if ex_gw_port:
num_ex_gw_ports += 1
num_interfaces += len(ri.router.get(
bc.constants.INTERFACE_KEY, []))
num_floating_ips += len(ri.router.get(
bc.constants.FLOATINGIP_KEY, []))
hd = ri.router['hosting_device']
if hd:
num_hd_routers[hd['id']] += 1
routers_per_hd = dict((hd_id, {'routers': num})
for hd_id, num in num_hd_routers.items())
non_responding = self._dev_status.get_backlogged_hosting_devices()
configurations['total routers'] = num_routers
configurations['total ex_gw_ports'] = num_ex_gw_ports
configurations['total interfaces'] = num_interfaces
configurations['total floating_ips'] = num_floating_ips
configurations['hosting_devices'] = routers_per_hd
configurations['non_responding_hosting_devices'] = non_responding
return configurations | Collect state from this helper.
A set of attributes which summarizes the state of the routers and
configurations managed by this config agent.
:param configurations: dict of configuration values
:return dict of updated configuration values | entailment |
def _fetch_router_info(self, router_ids=None, device_ids=None,
all_routers=False):
"""Fetch router dict from the routing plugin.
:param router_ids: List of router_ids of routers to fetch
:param device_ids: List of device_ids whose routers to fetch
:param all_routers: If True fetch all the routers for this agent.
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
try:
if all_routers:
LOG.debug('Fetching all routers')
router_ids = self.plugin_rpc.get_router_ids(self.context)
routers = self._fetch_router_chunk_data(router_ids)
elif router_ids:
routers = self._fetch_router_chunk_data(router_ids)
elif device_ids:
return self.plugin_rpc.get_routers(self.context,
hd_ids=device_ids)
except oslo_messaging.MessagingTimeout:
if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE:
self.sync_routers_chunk_size = max(
int(round(self.sync_routers_chunk_size / 2)),
SYNC_ROUTERS_MIN_CHUNK_SIZE)
LOG.warning('Server failed to return info for routers in '
'required time, decreasing chunk size to: %s',
self.sync_routers_chunk_size)
else:
LOG.warning('Server failed to return info for routers in '
'required time even with min chunk size: %s. '
'It might be under very high load or just '
'inoperable',
self.sync_routers_chunk_size)
raise
except oslo_messaging.MessagingException:
LOG.exception("RPC Error in fetching routers from plugin")
self.fullsync = True
raise n_exc.AbortSyncRouters()
LOG.debug("Periodic_sync_routers_task successfully completed")
# adjust chunk size after successful sync
if (self.sync_routers_chunk_size <
cfg.CONF.cfg_agent.max_device_sync_batch_size):
self.sync_routers_chunk_size = min(
self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE,
cfg.CONF.cfg_agent.max_device_sync_batch_size)
return routers | Fetch router dict from the routing plugin.
:param router_ids: List of router_ids of routers to fetch
:param device_ids: List of device_ids whose routers to fetch
:param all_routers: If True fetch all the routers for this agent.
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....] | entailment |
def _fetch_router_chunk_data(self, router_ids=None):
"""Fetch router data from the routing plugin in chunks.
:param router_ids: List of router_ids of routers to fetch
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....]
"""
curr_router = []
if len(router_ids) > self.sync_routers_chunk_size:
# fetch routers by chunks to reduce the load on server and
# to start router processing earlier
for i in range(0, len(router_ids),
self.sync_routers_chunk_size):
routers = self.plugin_rpc.get_routers(
self.context, (router_ids[i:i +
self.sync_routers_chunk_size]))
LOG.debug('Processing :%r', routers)
for r in routers:
curr_router.append(r)
else:
curr_router = self.plugin_rpc.get_routers(
self.context, router_ids=router_ids)
return curr_router | Fetch router data from the routing plugin in chunks.
:param router_ids: List of router_ids of routers to fetch
:return: List of router dicts of format:
[ {router_dict1}, {router_dict2},.....] | entailment |
def _handle_sync_devices(self, routers):
"""
Handles routers during a device_sync.
This method performs post-processing on routers fetched from the
routing plugin during a device sync. Routers are first fetched
from the plugin based on the list of device_ids. Since fetched
routers take precedence over pending work, matching router-ids
buffered in update_routers and removed_routers are discarded.
The existing router cache is also cleared in order to properly
trigger updates and deletes. Lastly, invalid configuration in
the underlying hosting-device is deleted via _cleanup_invalid_cfg.
Modifies updated_routers, removed_routers, and sync_devices
attributes
:param routers: working list of routers as populated in
process_services
"""
sync_devices_list = list(self.sync_devices)
LOG.debug("Fetching routers on:%s", sync_devices_list)
fetched_routers = self._fetch_router_info(device_ids=sync_devices_list)
if fetched_routers:
LOG.debug("[sync_devices] Fetched routers :%s",
pp.pformat(fetched_routers))
# clear router_config cache
for router_dict in fetched_routers:
self._del_from_updated_routers_cache(router_dict['id'])
self._del_from_removed_routers_cache(router_dict['id'])
LOG.debug("[sync_devices] invoking "
"_router_removed(%s)",
router_dict['id'])
self._router_removed(router_dict['id'],
deconfigure=False)
self._cleanup_invalid_cfg(fetched_routers)
routers.extend(fetched_routers)
self.sync_devices.clear()
LOG.debug("[sync_devices] %s finished",
sync_devices_list)
else:
# If the initial attempt to sync a device
# failed, retry again (by not clearing sync_devices)
# Normal updated_routers processing is still allowed
# to happen
self.sync_devices_attempts += 1
if (self.sync_devices_attempts >=
cfg.CONF.cfg_agent.max_device_sync_attempts):
LOG.debug("Max number [%d / %d ] of sync_devices "
"attempted. No further retries will "
"be attempted." %
(self.sync_devices_attempts,
cfg.CONF.cfg_agent.max_device_sync_attempts))
self.sync_devices.clear()
self.sync_devices_attempts = 0
else:
LOG.debug("Fetched routers was blank for sync attempt "
"[%d / %d], will attempt resync of %s devices "
"again in the next iteration" %
(self.sync_devices_attempts,
cfg.CONF.cfg_agent.max_device_sync_attempts,
pp.pformat(self.sync_devices))) | Handles routers during a device_sync.
This method performs post-processing on routers fetched from the
routing plugin during a device sync. Routers are first fetched
from the plugin based on the list of device_ids. Since fetched
routers take precedence over pending work, matching router-ids
buffered in update_routers and removed_routers are discarded.
The existing router cache is also cleared in order to properly
trigger updates and deletes. Lastly, invalid configuration in
the underlying hosting-device is deleted via _cleanup_invalid_cfg.
Modifies updated_routers, removed_routers, and sync_devices
attributes
:param routers: working list of routers as populated in
process_services | entailment |
def _get_router_ids_from_removed_devices_info(removed_devices_info):
"""Extract router_ids from the removed devices info dict.
:param removed_devices_info: Dict of removed devices and their
associated resources.
Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]},
...
},
'deconfigure': True/False
}
:return removed_router_ids: List of removed router ids
"""
removed_router_ids = []
for hd_id, resources in removed_devices_info['hosting_data'].items():
removed_router_ids += resources.get('routers', [])
return removed_router_ids | Extract router_ids from the removed devices info dict.
:param removed_devices_info: Dict of removed devices and their
associated resources.
Format:
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]},
...
},
'deconfigure': True/False
}
:return removed_router_ids: List of removed router ids | entailment |
def _sort_resources_per_hosting_device(resources):
"""This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
}
"""
hosting_devices = {}
for key in resources.keys():
for r in resources.get(key) or []:
if r.get('hosting_device') is None:
continue
hd_id = r['hosting_device']['id']
hosting_devices.setdefault(hd_id, {})
hosting_devices[hd_id].setdefault(key, []).append(r)
return hosting_devices | This function will sort the resources on hosting device.
The sorting on hosting device is done by looking up the
`hosting_device` attribute of the resource, and its `id`.
:param resources: a dict with key of resource name
:return dict sorted on the hosting device of input resource. Format:
hosting_devices = {
'hd_id1' : {'routers':[routers],
'removed_routers':[routers], .... }
'hd_id2' : {'routers':[routers], .. }
.......
} | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.