sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def create_port_channel(self, nexus_host, vpc_nbr):
"""Creates port channel n on Nexus switch."""
starttime = time.time()
vpc_str = str(vpc_nbr)
path_snip = snipp.PATH_ALL
body_snip = snipp.BODY_ADD_PORT_CH % (vpc_str, vpc_str, vpc_str)
self.send_edit_string(nexus_host, path_snip, body_snip)
self._apply_user_port_channel_config(nexus_host, vpc_nbr)
self.capture_and_print_timeshot(
starttime, "create_port_channel",
switch=nexus_host) | Creates port channel n on Nexus switch. | entailment |
def delete_port_channel(self, nexus_host, vpc_nbr):
"""Deletes delete port channel on Nexus switch."""
starttime = time.time()
path_snip = snipp.PATH_ALL
body_snip = snipp.BODY_DEL_PORT_CH % (vpc_nbr)
self.send_edit_string(nexus_host, path_snip, body_snip)
self.capture_and_print_timeshot(
starttime, "delete_port_channel",
switch=nexus_host) | Deletes delete port channel on Nexus switch. | entailment |
def _get_port_channel_group(self, nexus_host, intf_type, interface):
"""Look for 'channel-group x' config and return x.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns pc_group: Returns port channel group if
present else 0
"""
ch_grp = 0
# channel-group only applied to ethernet,
# otherwise, return 0
if intf_type != 'ethernet':
return ch_grp
match_key = "eth" + interface
action = snipp.PATH_GET_PC_MEMBERS
starttime = time.time()
result = self.client.rest_get(action, nexus_host)
self.capture_and_print_timeshot(starttime, "getpc",
switch=nexus_host)
try:
for pcmbr in result['imdata']:
mbr_data = pcmbr['pcRsMbrIfs']['attributes']
if mbr_data['tSKey'] == match_key:
_, nbr = mbr_data['parentSKey'].split("po")
ch_grp = int(nbr)
break
except Exception:
# Valid when there is no channel-group configured.
ch_grp = 0
LOG.debug("GET interface %(key)s port channel is %(pc)d",
{'key': match_key, 'pc': ch_grp})
return ch_grp | Look for 'channel-group x' config and return x.
:param nexus_host: IP address of Nexus switch
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:returns pc_group: Returns port channel group if
present else 0 | entailment |
def initialize_baremetal_switch_interfaces(self, interfaces):
"""Initialize Nexus interfaces and for initial baremetal event.
This get/create port channel number, applies channel-group to
ethernet interface, and initializes trunking on interface.
:param interfaces: Receive a list of interfaces containing:
nexus_host: IP address of Nexus switch
intf_type: String which specifies interface type. example: ethernet
interface: String indicating which interface. example: 1/19
is_native: Whether native vlan must be configured.
ch_grp: May replace port channel to each entry. channel number is
0 if none
"""
if not interfaces:
return
max_ifs = len(interfaces)
starttime = time.time()
learned, nexus_ip_list = self._build_host_list_and_verify_chgrp(
interfaces)
if not nexus_ip_list:
return
if max_ifs > 1:
# update vpc db with learned vpcid or get new one.
if learned:
ch_grp = interfaces[0][-1]
self._configure_learned_port_channel(
nexus_ip_list, ch_grp)
else:
ch_grp = self._get_new_baremetal_portchannel_id(nexus_ip_list)
else:
ch_grp = 0
for i, (nexus_host, intf_type, nexus_port, is_native,
ch_grp_saved) in enumerate(interfaces):
if max_ifs > 1:
if learned:
ch_grp = ch_grp_saved
else:
self._config_new_baremetal_portchannel(
ch_grp, nexus_host, intf_type, nexus_port)
self._replace_interface_ch_grp(interfaces, i, ch_grp)
# init port-channel instead of the provided ethernet
intf_type = 'port-channel'
nexus_port = str(ch_grp)
else:
self._replace_interface_ch_grp(interfaces, i, ch_grp)
trunk_mode_present, vlan_present = (
self._get_interface_switch_trunk_present(
nexus_host, intf_type, nexus_port))
if not vlan_present:
self.send_enable_vlan_on_trunk_int(
nexus_host, "", intf_type, nexus_port, False,
not trunk_mode_present)
elif not trunk_mode_present:
LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host,
nexus_help.format_interface_name(
intf_type, nexus_port))
self.capture_and_print_timeshot(
starttime, "init_bmif",
switch=nexus_host) | Initialize Nexus interfaces and for initial baremetal event.
This get/create port channel number, applies channel-group to
ethernet interface, and initializes trunking on interface.
:param interfaces: Receive a list of interfaces containing:
nexus_host: IP address of Nexus switch
intf_type: String which specifies interface type. example: ethernet
interface: String indicating which interface. example: 1/19
is_native: Whether native vlan must be configured.
ch_grp: May replace port channel to each entry. channel number is
0 if none | entailment |
def initialize_all_switch_interfaces(self, interfaces,
switch_ip=None, replay=True):
"""Configure Nexus interface and get port channel number.
Called during switch replay or just init if no replay
is configured. For latter case, only configured interfaces
are affected by this method.
During switch replay, the change group from the
host mapping data base is used. There is no attempt
to relearn port-channel from the Nexus switch. What
we last knew it to be will persist.
:param interfaces: List of interfaces for a given switch.
ch_grp can be altered as last arg
to each interface. If no ch_grp,
this arg will be zero.
:param switch_ip: IP address of Nexus switch
:param replay: Whether in replay path
"""
if not interfaces:
return
starttime = time.time()
if replay:
try:
vpcs = nxos_db.get_active_switch_vpc_allocs(switch_ip)
except cexc.NexusVPCAllocNotFound:
vpcs = []
for vpc in vpcs:
# if this is an allocated vpc, then recreate it
if not vpc.learned:
self.create_port_channel(switch_ip, vpc.vpc_id)
for i, (nexus_host, intf_type, nexus_port, is_native,
ch_grp) in enumerate(interfaces):
if replay and ch_grp != 0:
try:
vpc = nxos_db.get_switch_vpc_alloc(switch_ip, ch_grp)
self.add_ch_grp_to_interface(
nexus_host, intf_type, nexus_port, ch_grp)
except cexc.NexusVPCAllocNotFound:
pass
# if channel-group exists, switch to port-channel
# instead of the provided ethernet interface
intf_type = 'port-channel'
nexus_port = str(ch_grp)
#substitute content of ch_grp
no_chgrp_len = len(interfaces[i]) - 1
interfaces[i] = interfaces[i][:no_chgrp_len] + (ch_grp,)
trunk_mode_present, vlan_present = (
self._get_interface_switch_trunk_present(
nexus_host, intf_type, nexus_port))
if not vlan_present:
self.send_enable_vlan_on_trunk_int(
nexus_host, "", intf_type, nexus_port, False,
not trunk_mode_present)
elif not trunk_mode_present:
LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host,
nexus_help.format_interface_name(
intf_type, nexus_port))
self.capture_and_print_timeshot(
starttime, "get_allif",
switch=nexus_host) | Configure Nexus interface and get port channel number.
Called during switch replay or just init if no replay
is configured. For latter case, only configured interfaces
are affected by this method.
During switch replay, the change group from the
host mapping data base is used. There is no attempt
to relearn port-channel from the Nexus switch. What
we last knew it to be will persist.
:param interfaces: List of interfaces for a given switch.
ch_grp can be altered as last arg
to each interface. If no ch_grp,
this arg will be zero.
:param switch_ip: IP address of Nexus switch
:param replay: Whether in replay path | entailment |
def get_nexus_type(self, nexus_host):
"""Given the nexus host, get the type of Nexus switch.
:param nexus_host: IP address of Nexus switch
:returns: Nexus type
"""
starttime = time.time()
response = self.client.rest_get(
snipp.PATH_GET_NEXUS_TYPE, nexus_host)
self.capture_and_print_timeshot(
starttime, "gettype",
switch=nexus_host)
if response:
try:
result = response['imdata'][0]["eqptCh"]['attributes']['descr']
except Exception:
# Nexus Type is not depended on at this time so it's ok
# if can't get the Nexus type. The real purpose
# of this method is to determine if the connection is active.
result = ''
nexus_type = re.findall(
"Nexus\s*(\d)\d+\s*[0-9A-Z]+\s*"
"[cC]hassis",
result)
if len(nexus_type) > 0:
LOG.debug("GET call returned Nexus type %d",
int(nexus_type[0]))
return int(nexus_type[0])
else:
result = ''
LOG.debug("GET call failed to return Nexus type. Received %s.",
result)
return -1 | Given the nexus host, get the type of Nexus switch.
:param nexus_host: IP address of Nexus switch
:returns: Nexus type | entailment |
def get_create_vlan(self, nexus_host, vlanid, vni, conf_str):
"""Returns an XML snippet for create VLAN on a Nexus Switch."""
starttime = time.time()
if vni:
body_snip = snipp.BODY_VXLAN_ALL_INCR % (vlanid, vni)
else:
body_snip = snipp.BODY_VLAN_ALL_INCR % vlanid
conf_str += body_snip + snipp.BODY_VLAN_ALL_CONT
self.capture_and_print_timeshot(
starttime, "get_create_vlan",
switch=nexus_host)
return conf_str | Returns an XML snippet for create VLAN on a Nexus Switch. | entailment |
def set_all_vlan_states(self, nexus_host, vlanid_range):
"""Set the VLAN states to active."""
starttime = time.time()
if not vlanid_range:
LOG.warning("Exiting set_all_vlan_states: "
"No vlans to configure")
return
# Eliminate possible whitespace and separate vlans by commas
vlan_id_list = re.sub(r'\s', '', vlanid_range).split(',')
if not vlan_id_list or not vlan_id_list[0]:
LOG.warning("Exiting set_all_vlan_states: "
"No vlans to configure")
return
path_str, body_vlan_all = self.start_create_vlan()
while vlan_id_list:
rangev = vlan_id_list.pop(0)
if '-' in rangev:
fr, to = rangev.split('-')
max = int(to) + 1
for vlan_id in range(int(fr), max):
body_vlan_all = self.get_create_vlan(
nexus_host, vlan_id, 0, body_vlan_all)
else:
body_vlan_all = self.get_create_vlan(
nexus_host, rangev, 0, body_vlan_all)
body_vlan_all = self.end_create_vlan(body_vlan_all)
self.send_edit_string(
nexus_host, path_str, body_vlan_all)
self.capture_and_print_timeshot(
starttime, "set_all_vlan_states",
switch=nexus_host) | Set the VLAN states to active. | entailment |
def create_vlan(self, nexus_host, vlanid, vni):
"""Given switch, vlanid, vni, Create a VLAN on Switch."""
starttime = time.time()
path_snip, body_snip = self.start_create_vlan()
body_snip = self.get_create_vlan(nexus_host, vlanid, vni, body_snip)
body_snip = self.end_create_vlan(body_snip)
self.send_edit_string(nexus_host, path_snip, body_snip)
self.capture_and_print_timeshot(
starttime, "create_vlan_seg",
switch=nexus_host) | Given switch, vlanid, vni, Create a VLAN on Switch. | entailment |
def delete_vlan(self, nexus_host, vlanid):
"""Delete a VLAN on Nexus Switch given the VLAN ID."""
starttime = time.time()
path_snip = snipp.PATH_VLAN % vlanid
self.client.rest_delete(path_snip, nexus_host)
self.capture_and_print_timeshot(
starttime, "del_vlan",
switch=nexus_host) | Delete a VLAN on Nexus Switch given the VLAN ID. | entailment |
def _get_vlan_body_on_trunk_int(self, nexus_host, vlanid, intf_type,
interface, is_native, is_delete,
add_mode):
"""Prepares an XML snippet for VLAN on a trunk interface.
:param nexus_host: IP address of Nexus switch
:param vlanid: Vlanid(s) to add to interface
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:param is_native: Is native vlan config desired?
:param is_delete: Is this a delete operation?
:param add_mode: Add mode trunk
:returns path_snippet, body_snippet
"""
starttime = time.time()
LOG.debug("NexusDriver get if body config for host %s: "
"if_type %s port %s",
nexus_host, intf_type, interface)
if intf_type == "ethernet":
body_if_type = "l1PhysIf"
path_interface = "phys-[eth" + interface + "]"
else:
body_if_type = "pcAggrIf"
path_interface = "aggr-[po" + interface + "]"
path_snip = (snipp.PATH_IF % (path_interface))
mode = snipp.BODY_PORT_CH_MODE if add_mode else ''
if is_delete:
increment_it = "-"
debug_desc = "delif"
native_vlan = ""
else:
native_vlan = 'vlan-' + str(vlanid)
debug_desc = "createif"
if vlanid is "":
increment_it = ""
else:
increment_it = "+"
if is_native:
body_snip = (snipp.BODY_NATIVE_TRUNKVLAN %
(body_if_type, mode, increment_it + str(vlanid),
str(native_vlan)))
else:
body_snip = (snipp.BODY_TRUNKVLAN %
(body_if_type, mode, increment_it + str(vlanid)))
self.capture_and_print_timeshot(
starttime, debug_desc,
switch=nexus_host)
return path_snip, body_snip | Prepares an XML snippet for VLAN on a trunk interface.
:param nexus_host: IP address of Nexus switch
:param vlanid: Vlanid(s) to add to interface
:param intf_type: String which specifies interface type.
example: ethernet
:param interface: String indicating which interface.
example: 1/19
:param is_native: Is native vlan config desired?
:param is_delete: Is this a delete operation?
:param add_mode: Add mode trunk
:returns path_snippet, body_snippet | entailment |
def disable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type,
interface, is_native):
"""Disable a VLAN on a trunk interface."""
starttime = time.time()
path_snip, body_snip = self._get_vlan_body_on_trunk_int(
nexus_host, vlanid, intf_type, interface,
is_native, True, False)
self.send_edit_string(nexus_host, path_snip, body_snip)
self.capture_and_print_timeshot(
starttime, "delif",
switch=nexus_host) | Disable a VLAN on a trunk interface. | entailment |
def send_edit_string(self, nexus_host, path_snip, body_snip,
check_to_close_session=True):
"""Sends rest Post request to Nexus switch."""
starttime = time.time()
LOG.debug("NexusDriver edit config for host %s: path: %s body: %s",
nexus_host, path_snip, body_snip)
self.client.rest_post(path_snip, nexus_host, body_snip)
self.capture_and_print_timeshot(
starttime, "send_edit",
switch=nexus_host) | Sends rest Post request to Nexus switch. | entailment |
def _send_cli_conf_string(self, nexus_host, cli_str):
"""Sends CLI Config commands to Nexus switch using NXAPI."""
starttime = time.time()
path_snip = snipp.PATH_USER_CMDS
body_snip = snipp.BODY_USER_CONF_CMDS % ('1', cli_str)
LOG.debug("NexusDriver CLI config for host %s: path: %s body: %s",
nexus_host, path_snip, body_snip)
self.nxapi_client.rest_post(path_snip, nexus_host, body_snip)
self.capture_and_print_timeshot(
starttime, "send_cliconf",
switch=nexus_host) | Sends CLI Config commands to Nexus switch using NXAPI. | entailment |
def send_enable_vlan_on_trunk_int(self, nexus_host, vlanid, intf_type,
interface, is_native, add_mode=False):
"""Gathers and sends an interface trunk XML snippet."""
path_snip, body_snip = self._get_vlan_body_on_trunk_int(
nexus_host, vlanid, intf_type, interface,
is_native, False, add_mode)
self.send_edit_string(nexus_host, path_snip, body_snip) | Gathers and sends an interface trunk XML snippet. | entailment |
def create_and_trunk_vlan(self, nexus_host, vlan_id, intf_type,
nexus_port, vni, is_native):
"""Create VLAN and trunk it on the specified ports."""
starttime = time.time()
self.create_vlan(nexus_host, vlan_id, vni)
LOG.debug("NexusDriver created VLAN: %s", vlan_id)
if nexus_port:
self.send_enable_vlan_on_trunk_int(
nexus_host, vlan_id,
intf_type, nexus_port,
is_native)
self.capture_and_print_timeshot(
starttime, "create_all",
switch=nexus_host) | Create VLAN and trunk it on the specified ports. | entailment |
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
"""Enable VXLAN on the switch."""
# Configure the "feature" commands and NVE interface
# (without "member" subcommand configuration).
# The Nexus 9K will not allow the "interface nve" configuration
# until the "feature nv overlay" command is issued and installed.
# To get around the N9K failing on the "interface nve" command
# send the two XML snippets down separately.
starttime = time.time()
# Do CLI 'feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
# Do CLI 'feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
# Do CLI 'int nve1' to Create nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
# Do CLI 'no shut
# source-interface loopback %s'
# beneath int nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) | Enable VXLAN on the switch. | entailment |
def disable_vxlan_feature(self, nexus_host):
"""Disable VXLAN on the switch."""
# Removing the "feature nv overlay" configuration also
# removes the "interface nve" configuration.
starttime = time.time()
# Do CLI 'no feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "disabled"))
# Do CLI 'no feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "disabled"))
self.capture_and_print_timeshot(
starttime, "disable_vxlan",
switch=nexus_host) | Disable VXLAN on the switch. | entailment |
def create_nve_member(self, nexus_host, nve_int_num, vni, mcast_group):
"""Add a member configuration to the NVE interface."""
# Do CLI [no] member vni %s mcast-group %s
# beneath int nve1
starttime = time.time()
path = snipp.PATH_VNI_UPDATE % (nve_int_num, vni)
body = snipp.BODY_VNI_UPDATE % (vni, vni, vni, mcast_group)
self.send_edit_string(nexus_host, path, body)
self.capture_and_print_timeshot(
starttime, "create_nve",
switch=nexus_host) | Add a member configuration to the NVE interface. | entailment |
def delete_nve_member(self, nexus_host, nve_int_num, vni):
"""Delete a member configuration on the NVE interface."""
starttime = time.time()
path_snip = snipp.PATH_VNI_UPDATE % (nve_int_num, vni)
self.client.rest_delete(path_snip, nexus_host)
self.capture_and_print_timeshot(
starttime, "delete_nve",
switch=nexus_host) | Delete a member configuration on the NVE interface. | entailment |
def _get_vrf_name(self, ri):
"""
overloaded method for generating a vrf_name that supports
region_id
"""
router_id = ri.router_name()[:self.DEV_NAME_LEN]
is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region
if is_multi_region_enabled:
region_id = cfg.CONF.multi_region.region_id
vrf_name = "%s-%s" % (router_id, region_id)
else:
vrf_name = router_id
return vrf_name | overloaded method for generating a vrf_name that supports
region_id | entailment |
def _get_interface_name_from_hosting_port(self, port):
"""
Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500
"""
try:
vlan = port['hosting_info']['segmentation_id']
int_prefix = port['hosting_info']['physical_interface']
return '%s.%s' % (int_prefix, vlan)
except KeyError as e:
params = {'key': e}
raise cfg_exc.DriverExpectedKeyNotSetException(**params) | Extract the underlying subinterface name for a port
e.g. Port-channel10.200 or GigabitEthernet0/0/0.500 | entailment |
def _get_item(list_containing_dicts_entries, attribute_value,
attribute_name='subnet_id'):
"""Searches a list of dicts and returns the first matching entry
The dict entry returned contains the attribute 'attribute_name' whose
value equals 'attribute_value'. If no such dict is found in the list
an empty dict is returned.
"""
for item in list_containing_dicts_entries:
if item.get(attribute_name) == attribute_value:
return item
return {} | Searches a list of dicts and returns the first matching entry
The dict entry returned contains the attribute 'attribute_name' whose
value equals 'attribute_value'. If no such dict is found in the list
an empty dict is returned. | entailment |
def _nat_rules_for_internet_access(self, acl_no, network, netmask,
inner_itfc, outer_itfc, vrf_name):
"""Configure the NAT rules for an internal network.
Configuring NAT rules in the ASR1k is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the ASR1k for this
ACL and VRF of the neutron router. Finally enable NAT on the interfaces
of the ASR1k where the internal and external networks are connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_itfc: (name of) interface connected to the internal
network
:param outer_itfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
IOSXEConfigException
"""
acl_present = self._check_acl(acl_no, network, netmask)
if not acl_present:
conf_str = snippets.CREATE_ACL % (acl_no, network, netmask)
self._edit_running_config(conf_str, 'CREATE_ACL')
pool_name = "%s_nat_pool" % vrf_name
conf_str = asr1k_snippets.SET_DYN_SRC_TRL_POOL % (acl_no, pool_name,
vrf_name)
try:
self._edit_running_config(conf_str, 'SET_DYN_SRC_TRL_POOL')
except Exception as dyn_nat_e:
LOG.info("Ignore exception for SET_DYN_SRC_TRL_POOL: %s. "
"The config seems to be applied properly but netconf "
"seems to report an error.", dyn_nat_e)
conf_str = snippets.SET_NAT % (inner_itfc, 'inside')
self._edit_running_config(conf_str, 'SET_NAT')
conf_str = snippets.SET_NAT % (outer_itfc, 'outside')
self._edit_running_config(conf_str, 'SET_NAT') | Configure the NAT rules for an internal network.
Configuring NAT rules in the ASR1k is a three step process. First
create an ACL for the IP range of the internal network. Then enable
dynamic source NATing on the external interface of the ASR1k for this
ACL and VRF of the neutron router. Finally enable NAT on the interfaces
of the ASR1k where the internal and external networks are connected.
:param acl_no: ACL number of the internal network.
:param network: internal network
:param netmask: netmask of the internal network.
:param inner_itfc: (name of) interface connected to the internal
network
:param outer_itfc: (name of) interface connected to the external
network
:param vrf_name: VRF corresponding to this virtual router
:return: True if configuration succeeded
:raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
IOSXEConfigException | entailment |
def _remove_internal_nw_nat_rules(self, ri, ports, ext_port,
intf_deleted=False):
"""
Removes the NAT rules already configured when an internal network is
removed.
:param ri -- router-info object
:param ports -- list of affected ports where network nat rules
was affected
:param ext_port -- external facing port
:param intf_deleted-- If True, indicates that the subinterface was
deleted.
"""
acls = []
# first disable nat in all inner ports
for port in ports:
in_itfc_name = self._get_interface_name_from_hosting_port(port)
acls.append(self._generate_acl_num_from_port(port))
is_alone = len(port['change_details']['current_ports']) == 1
if not intf_deleted and is_alone is True:
self._remove_interface_nat(in_itfc_name, 'inside')
# There is a possibility that the dynamic NAT rule cannot be removed
# from the running config, if there is still traffic in the inner
# interface causing a rule to be present in the NAT translation
# table. For this we give 2 seconds for the 'inside NAT rule' to
# expire and then clear the NAT translation table manually. This can
# be costly and hence is not enabled here, pending further
# sinvestigation.
# LOG.debug("Sleep for 2 seconds before clearing NAT rules")
# time.sleep(2)
# clear the NAT translation table
# self._remove_dyn_nat_translations()
# remove dynamic nat rules and acls
vrf_name = self._get_vrf_name(ri)
ext_itfc_name = self._get_interface_name_from_hosting_port(ext_port)
for acl in acls:
self._remove_dyn_nat_rule(acl, ext_itfc_name, vrf_name) | Removes the NAT rules already configured when an internal network is
removed.
:param ri -- router-info object
:param ports -- list of affected ports where network nat rules
was affected
:param ext_port -- external facing port
:param intf_deleted-- If True, indicates that the subinterface was
deleted. | entailment |
def _do_add_floating_ip_asr1k(self, floating_ip, fixed_ip, vrf,
ex_gw_port):
"""
To implement a floating ip, an ip static nat is configured in the
underlying router ex_gw_port contains data to derive the vlan
associated with related subnet for the fixed ip. The vlan in turn
is applied to the redundancy parameter for setting the IP NAT.
"""
vlan = ex_gw_port['hosting_info']['segmentation_id']
hsrp_grp = ex_gw_port[ha.HA_INFO]['group']
LOG.debug("add floating_ip: %(fip)s, fixed_ip: %(fixed_ip)s, "
"vrf: %(vrf)s, ex_gw_port: %(port)s",
{'fip': floating_ip, 'fixed_ip': fixed_ip, 'vrf': vrf,
'port': ex_gw_port})
confstr = (asr1k_snippets.SET_STATIC_SRC_TRL_NO_VRF_MATCH %
(fixed_ip, floating_ip, vrf, hsrp_grp, vlan))
self._edit_running_config(confstr, 'SET_STATIC_SRC_TRL_NO_VRF_MATCH') | To implement a floating ip, an ip static nat is configured in the
underlying router ex_gw_port contains data to derive the vlan
associated with related subnet for the fixed ip. The vlan in turn
is applied to the redundancy parameter for setting the IP NAT. | entailment |
def report_non_responding_hosting_devices(self, context, host,
hosting_device_ids):
"""Report that a hosting device is determined to be dead.
:param context: contains user information
:param host: originator of callback
:param hosting_device_ids: list of non-responding hosting devices
"""
# let the generic status update callback function handle this callback
self.update_hosting_device_status(context, host,
{const.HD_DEAD: hosting_device_ids}) | Report that a hosting device is determined to be dead.
:param context: contains user information
:param host: originator of callback
:param hosting_device_ids: list of non-responding hosting devices | entailment |
def update_hosting_device_status(self, context, host, status_info):
"""Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
}
"""
for status, hd_ids in six.iteritems(status_info):
# update hosting device entry in db to new status
hd_spec = {'hosting_device': {'status': status}}
for hd_id in hd_ids:
self._dmplugin.update_hosting_device(context, hd_id, hd_spec)
if status == const.HD_DEAD or status == const.HD_ERROR:
self._dmplugin.handle_non_responding_hosting_devices(
context, host, hd_ids) | Report status changes for hosting devices.
:param context: contains user information
:param host: originator of callback
:param status_info: Dictionary with list of hosting device ids for
each type of hosting device status to be updated i.e.::
{
HD_ACTIVE: list_of_ids_of_active_hds,
HD_NOT_RESPONDING: list_of_ids_of_not_responding_hds,
HD_DEAD: list_of_ids_of_dead_hds,
...
} | entailment |
def get_hosting_devices_for_agent(self, context, host):
"""Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent
"""
agent_ids = self._dmplugin.get_cfg_agents(context, active=None,
filters={'host': [host]})
if agent_ids:
return [self._dmplugin.get_device_info_for_agent(context, hd_db)
for hd_db in self._dmplugin.get_hosting_devices_db(
context, filters={'cfg_agent_id': [agent_ids[0].id]})]
return [] | Fetches routers that a Cisco cfg agent is managing.
This function is supposed to be called when the agent has started,
is ready to take on assignments and before any callbacks to fetch
logical resources are issued.
:param context: contains user information
:param host: originator of callback
:returns: dict of hosting devices managed by the cfg agent | entailment |
def add_router_to_hosting_device(self, context, hosting_device_id,
router_id):
"""Add a (non-hosted) router to a hosting device."""
e_context = context.elevated()
r_hd_binding_db = self._get_router_binding_info(e_context, router_id)
if r_hd_binding_db.hosting_device_id:
if r_hd_binding_db.hosting_device_id == hosting_device_id:
return
raise routertypeawarescheduler.RouterHostedByHostingDevice(
router_id=router_id, hosting_device_id=hosting_device_id)
rt_info = self.validate_hosting_device_router_combination(
context, r_hd_binding_db, hosting_device_id)
result = self.schedule_router_on_hosting_device(
e_context, r_hd_binding_db, hosting_device_id,
rt_info['slot_need'])
if result:
# refresh so that we get latest contents from DB
e_context.session.expire(r_hd_binding_db)
router = self.get_router(e_context, router_id)
self.add_type_and_hosting_device_info(
e_context, router, r_hd_binding_db, schedule=False)
l3_cfg_notifier = self.agent_notifiers.get(AGENT_TYPE_L3_CFG)
if l3_cfg_notifier:
l3_cfg_notifier.router_added_to_hosting_device(context, router)
else:
raise routertypeawarescheduler.RouterSchedulingFailed(
router_id=router_id, hosting_device_id=hosting_device_id) | Add a (non-hosted) router to a hosting device. | entailment |
def remove_router_from_hosting_device(self, context, hosting_device_id,
router_id):
"""Remove the router from hosting device.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another hosting device
manually.
"""
e_context = context.elevated()
r_hd_binding_db = self._get_router_binding_info(e_context, router_id)
if r_hd_binding_db.hosting_device_id != hosting_device_id:
raise routertypeawarescheduler.RouterNotHostedByHostingDevice(
router_id=router_id, hosting_device_id=hosting_device_id)
router = self.get_router(context, router_id)
self.add_type_and_hosting_device_info(
e_context, router, r_hd_binding_db, schedule=False)
# conditionally remove router from backlog ensure it does not get
# scheduled automatically
self.remove_router_from_backlog(id)
l3_cfg_notifier = self.agent_notifiers.get(AGENT_TYPE_L3_CFG)
if l3_cfg_notifier:
l3_cfg_notifier.router_removed_from_hosting_device(context, router)
LOG.debug("Unscheduling router %s", r_hd_binding_db.router_id)
self.unschedule_router_from_hosting_device(context, r_hd_binding_db)
# now unbind the router from the hosting device
with e_context.session.begin(subtransactions=True):
r_hd_binding_db.hosting_device_id = None
e_context.session.add(r_hd_binding_db) | Remove the router from hosting device.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another hosting device
manually. | entailment |
def get_number_of_agents_for_scheduling(self, context):
"""Return number of agents on which the router will be scheduled."""
num_agents = len(self.get_l3_agents(context, active=True,
filters={'agent_modes': [bc.constants.L3_AGENT_MODE_LEGACY,
bc.constants.L3_AGENT_MODE_DVR_SNAT]}))
max_agents = cfg.CONF.max_l3_agents_per_router
if max_agents:
if max_agents > num_agents:
LOG.info("Number of active agents lower than "
"max_l3_agents_per_router. L3 agents "
"available: %s", num_agents)
else:
num_agents = max_agents
return num_agents | Return number of agents on which the router will be scheduled. | entailment |
def _notify_subnet_create(resource, event, trigger, **kwargs):
"""Called when a new subnet is created in the external network"""
context = kwargs['context']
subnet = kwargs['subnet']
l3plugin = bc.get_plugin(L3_ROUTER_NAT)
for router in l3plugin.get_routers(context):
if (router['external_gateway_info'] and
(router['external_gateway_info']['network_id'] ==
subnet['network_id'])):
router_data = {'router': router}
l3plugin.update_router(context, router['id'], router_data) | Called when a new subnet is created in the external network | entailment |
def _notify_cfg_agent_port_update(resource, event, trigger, **kwargs):
"""Called when router port/interface is enabled/disabled"""
original_port = kwargs.get('original_port')
updated_port = kwargs.get('port')
if (updated_port is not None and original_port is not None and (
updated_port.get('admin_state_up')) != (
original_port.get('admin_state_up'))):
new_port_data = {'port': {}}
new_port_data['port']['admin_state_up'] = (
updated_port.get('admin_state_up'))
original_device_owner = original_port.get('device_owner', '')
if original_device_owner.startswith('network'):
router_id = original_port.get('device_id')
context = kwargs.get('context')
l3plugin = bc.get_plugin(L3_ROUTER_NAT)
if l3plugin and router_id:
l3plugin._notify_port_update_routers(context, router_id,
original_port,
new_port_data,
'update_port_status_cfg') | Called when router port/interface is enabled/disabled | entailment |
def is_port_profile_created(self, vlan_id, device_id):
"""Indicates if port profile has been created on UCS Manager."""
entry = self.session.query(ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, device_id=device_id).first()
return entry and entry.created_on_ucs | Indicates if port profile has been created on UCS Manager. | entailment |
def get_port_profile_for_vlan(self, vlan_id, device_id):
"""Returns Vlan id associated with the port profile."""
entry = self.session.query(ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, device_id=device_id).first()
return entry.profile_id if entry else None | Returns Vlan id associated with the port profile. | entailment |
def add_port_profile(self, profile_name, vlan_id, device_id):
"""Adds a port profile and its vlan_id to the table."""
if not self.get_port_profile_for_vlan(vlan_id, device_id):
port_profile = ucsm_model.PortProfile(profile_id=profile_name,
vlan_id=vlan_id,
device_id=device_id,
created_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(port_profile)
return port_profile | Adds a port profile and its vlan_id to the table. | entailment |
def set_port_profile_created(self, vlan_id, profile_name, device_id):
"""Sets created_on_ucs flag to True."""
with self.session.begin(subtransactions=True):
port_profile = self.session.query(
ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id, profile_id=profile_name,
device_id=device_id).first()
if port_profile:
port_profile.created_on_ucs = True
self.session.merge(port_profile)
else:
new_profile = ucsm_model.PortProfile(profile_id=profile_name,
vlan_id=vlan_id,
device_id=device_id,
created_on_ucs=True)
self.session.add(new_profile) | Sets created_on_ucs flag to True. | entailment |
def delete_vlan_entry(self, vlan_id):
"""Deletes entry for a vlan_id if it exists."""
with self.session.begin(subtransactions=True):
try:
self.session.query(ucsm_model.PortProfile).filter_by(
vlan_id=vlan_id).delete()
except orm.exc.NoResultFound:
return | Deletes entry for a vlan_id if it exists. | entailment |
def add_service_profile_template(self, vlan_id, sp_template, ucsm_ip):
"""Adds an entry for a vlan_id on a SP template to the table."""
if not self.get_sp_template_vlan_entry(vlan_id, sp_template, ucsm_ip):
entry = ucsm_model.ServiceProfileTemplate(vlan_id=vlan_id,
sp_template=sp_template,
device_id=ucsm_ip,
updated_on_ucs=False)
self.session.add(entry) | Adds an entry for a vlan_id on a SP template to the table. | entailment |
def set_sp_template_updated(self, vlan_id, sp_template, device_id):
"""Sets update_on_ucs flag to True."""
entry = self.get_sp_template_vlan_entry(vlan_id,
sp_template,
device_id)
if entry:
entry.updated_on_ucs = True
self.session.merge(entry)
return entry
else:
return False | Sets update_on_ucs flag to True. | entailment |
def delete_sp_template_for_vlan(self, vlan_id):
"""Deletes SP Template for a vlan_id if it exists."""
with self.session.begin(subtransactions=True):
try:
self.session.query(
ucsm_model.ServiceProfileTemplate).filter_by(
vlan_id=vlan_id).delete()
except orm.exc.NoResultFound:
return | Deletes SP Template for a vlan_id if it exists. | entailment |
def add_vnic_template(self, vlan_id, ucsm_ip, vnic_template, physnet):
"""Adds an entry for a vlan_id on a SP template to the table."""
if not self.get_vnic_template_vlan_entry(vlan_id, vnic_template,
ucsm_ip, physnet):
vnic_t = ucsm_model.VnicTemplate(vlan_id=vlan_id,
vnic_template=vnic_template,
device_id=ucsm_ip,
physnet=physnet,
updated_on_ucs=False)
with self.session.begin(subtransactions=True):
self.session.add(vnic_t)
return vnic_t | Adds an entry for a vlan_id on a SP template to the table. | entailment |
def set_vnic_template_updated(self, vlan_id, ucsm_ip, vnic_template,
physnet):
"""Sets update_on_ucs flag to True for a Vnic Template entry."""
with self.session.begin(subtransactions=True):
entry = self.get_vnic_template_vlan_entry(vlan_id, vnic_template,
ucsm_ip, physnet)
if entry:
entry.updated_on_ucs = True
self.session.merge(entry)
return entry | Sets update_on_ucs flag to True for a Vnic Template entry. | entailment |
def delete_vnic_template_for_vlan(self, vlan_id):
"""Deletes VNIC Template for a vlan_id and physnet if it exists."""
with self.session.begin(subtransactions=True):
try:
self.session.query(ucsm_model.VnicTemplate).filter_by(
vlan_id=vlan_id).delete()
except orm.exc.NoResultFound:
return | Deletes VNIC Template for a vlan_id and physnet if it exists. | entailment |
def has_port_profile_to_delete(self, profile_name, device_id):
"""Returns True if port profile delete table containes PP."""
count = self.session.query(ucsm_model.PortProfileDelete).filter_by(
profile_id=profile_name, device_id=device_id).count()
return count != 0 | Returns True if port profile delete table containes PP. | entailment |
def add_port_profile_to_delete_table(self, profile_name, device_id):
"""Adds a port profile to the delete table."""
if not self.has_port_profile_to_delete(profile_name, device_id):
port_profile = ucsm_model.PortProfileDelete(
profile_id=profile_name, device_id=device_id)
with self.session.begin(subtransactions=True):
self.session.add(port_profile)
return port_profile | Adds a port profile to the delete table. | entailment |
def remove_port_profile_to_delete(self, profile_name, device_id):
"""Removes port profile to be deleted from table."""
with self.session.begin(subtransactions=True):
self.session.query(ucsm_model.PortProfileDelete).filter_by(
profile_id=profile_name, device_id=device_id).delete() | Removes port profile to be deleted from table. | entailment |
def parse(cls, buf):
"""Parse DHCP Packet.
1. To get client IP Address(ciaddr).
2. To get relaying gateway IP Address(giaddr).
3. To get DHCP Relay Agent Information Option Suboption
such as Link Selection, VSS, Server Identifier override.
"""
pkt = DhcpPacket()
(pkt.ciaddr,) = cls.struct('4s').unpack_from(buf, 12)
(pkt.giaddr,) = cls.struct('4s').unpack_from(buf, 24)
cls.struct('4s').pack_into(buf, 24, b'')
pos = 240
while pos < len(buf):
(opttag,) = cls.struct('B').unpack_from(buf, pos)
if opttag == 0:
pos += 1
continue
if opttag == END:
pkt.end = pos
break
(optlen,) = cls.struct('B').unpack_from(buf, pos + 1)
startpos = pos
pos += 2
if opttag != RELAY_AGENT_INFO:
pos += optlen
continue
optend = pos + optlen
while pos < optend:
(subopttag, suboptlen) = cls.struct('BB').unpack_from(buf, pos)
fmt = '%is' % (suboptlen,)
(val,) = cls.struct(fmt).unpack_from(buf, pos + 2)
pkt.relay_options[subopttag] = val
pos += suboptlen + 2
cls.struct('%is' % (optlen + 2)).pack_into(buf, startpos, b'')
pkt.buf = buf
return pkt | Parse DHCP Packet.
1. To get client IP Address(ciaddr).
2. To get relaying gateway IP Address(giaddr).
3. To get DHCP Relay Agent Information Option Suboption
such as Link Selection, VSS, Server Identifier override. | entailment |
def report_dead_hosting_devices(self, context, hd_ids=None):
"""Report that a hosting device cannot be contacted (presumed dead).
:param: context: session context
:param: hosting_device_ids: list of non-responding hosting devices
:return: None
"""
cctxt = self.client.prepare()
cctxt.cast(context, 'report_non_responding_hosting_devices',
host=self.host, hosting_device_ids=hd_ids) | Report that a hosting device cannot be contacted (presumed dead).
:param: context: session context
:param: hosting_device_ids: list of non-responding hosting devices
:return: None | entailment |
def register_for_duty(self, context):
"""Report that a config agent is ready for duty."""
cctxt = self.client.prepare()
return cctxt.call(context, 'register_for_duty', host=self.host) | Report that a config agent is ready for duty. | entailment |
def get_hosting_devices_for_agent(self, context):
"""Get a list of hosting devices assigned to this agent."""
cctxt = self.client.prepare()
return cctxt.call(context,
'get_hosting_devices_for_agent',
host=self.host) | Get a list of hosting devices assigned to this agent. | entailment |
def process_services(self, device_ids=None, removed_devices_info=None):
"""Process services managed by this config agent.
This method is invoked by any of three scenarios.
1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
seconds. This is the most common scenario.
In this mode, the method is called without any arguments.
2. Called by the `_process_backlogged_hosting_devices()` as part of
the backlog processing task. In this mode, a list of device_ids
are passed as arguments. These are the list of backlogged
hosting devices that are now reachable and we want to sync services
on them.
3. Called by the `hosting_devices_removed()` method. This is when
the config agent has received a notification from the plugin that
some hosting devices are going to be removed. The payload contains
the details of the hosting devices and the associated neutron
resources on them which should be processed and removed.
To avoid race conditions with these scenarios, this function is
protected by a lock.
This method goes on to invoke `process_service()` on the
different service helpers.
:param device_ids: List of devices that are now available and needs to
be processed
:param removed_devices_info: Info about the hosting devices which are
going to be removed and details of the resources hosted on them.
Expected Format::
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]}, ...},
'deconfigure': True/False
}
:returns: None
"""
LOG.debug("Processing services started")
# Now we process only routing service, additional services will be
# added in future
if self.routing_service_helper:
self.routing_service_helper.process_service(device_ids,
removed_devices_info)
else:
LOG.warning("No routing service helper loaded")
LOG.debug("Processing services completed") | Process services managed by this config agent.
This method is invoked by any of three scenarios.
1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
seconds. This is the most common scenario.
In this mode, the method is called without any arguments.
2. Called by the `_process_backlogged_hosting_devices()` as part of
the backlog processing task. In this mode, a list of device_ids
are passed as arguments. These are the list of backlogged
hosting devices that are now reachable and we want to sync services
on them.
3. Called by the `hosting_devices_removed()` method. This is when
the config agent has received a notification from the plugin that
some hosting devices are going to be removed. The payload contains
the details of the hosting devices and the associated neutron
resources on them which should be processed and removed.
To avoid race conditions with these scenarios, this function is
protected by a lock.
This method goes on to invoke `process_service()` on the
different service helpers.
:param device_ids: List of devices that are now available and needs to
be processed
:param removed_devices_info: Info about the hosting devices which are
going to be removed and details of the resources hosted on them.
Expected Format::
{
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
'hd_id2': {'routers': [id3, id4, ...]}, ...},
'deconfigure': True/False
}
:returns: None | entailment |
def _process_backlogged_hosting_devices(self, context):
"""Process currently backlogged devices.
Go through the currently backlogged devices and process them.
For devices which are now reachable (compared to last time), we call
`process_services()` passing the now reachable device's id.
For devices which have passed the `hosting_device_dead_timeout` and
hence presumed dead, execute a RPC to the plugin informing that.
heartbeat revision
res['reachable'] - hosting device went from Unknown to Active state
process_services(...)
res['revived'] - hosting device went from Dead to Active
inform device manager that the hosting
device is now responsive
res['dead'] - hosting device went from Unknown to Dead
inform device manager that the hosting
device is non-responding
As additional note for the revived case:
Although the plugin was notified, there may be some lag
before the plugin actually can reschedule it's backlogged routers.
If process_services(device_ids...) isn't successful initially,
subsequent device syncs will be attempted until
MAX_DEVICE_SYNC_ATTEMPTS occurs. Main process_service task
will resume if sync_devices is populated.
:param context: RPC context
:return: None
"""
driver_mgr = self.get_routing_service_helper().driver_manager
res = self._dev_status.check_backlogged_hosting_devices(driver_mgr)
if res['reachable']:
self.process_services(device_ids=res['reachable'])
if res['revived']:
LOG.debug("Reporting revived hosting devices: %s " %
res['revived'])
# trigger a sync only on the revived hosting-devices
if self.conf.cfg_agent.enable_heartbeat is True:
self.devmgr_rpc.report_revived_hosting_devices(
context, hd_ids=res['revived'])
self.process_services(device_ids=res['revived'])
if res['dead']:
LOG.debug("Reporting dead hosting devices: %s", res['dead'])
self.devmgr_rpc.report_dead_hosting_devices(context,
hd_ids=res['dead']) | Process currently backlogged devices.
Go through the currently backlogged devices and process them.
For devices which are now reachable (compared to last time), we call
`process_services()` passing the now reachable device's id.
For devices which have passed the `hosting_device_dead_timeout` and
hence presumed dead, execute a RPC to the plugin informing that.
heartbeat revision
res['reachable'] - hosting device went from Unknown to Active state
process_services(...)
res['revived'] - hosting device went from Dead to Active
inform device manager that the hosting
device is now responsive
res['dead'] - hosting device went from Unknown to Dead
inform device manager that the hosting
device is non-responding
As additional note for the revived case:
Although the plugin was notified, there may be some lag
before the plugin actually can reschedule it's backlogged routers.
If process_services(device_ids...) isn't successful initially,
subsequent device syncs will be attempted until
MAX_DEVICE_SYNC_ATTEMPTS occurs. Main process_service task
will resume if sync_devices is populated.
:param context: RPC context
:return: None | entailment |
def agent_updated(self, context, payload):
"""Deal with agent updated RPC message."""
try:
if payload['admin_state_up']:
#TODO(hareeshp): implement agent updated handling
pass
except KeyError as e:
LOG.error("Invalid payload format for received RPC message "
"`agent_updated`. Error is %(error)s. Payload is "
"%(payload)s", {'error': e, 'payload': payload}) | Deal with agent updated RPC message. | entailment |
def hosting_devices_assigned_to_cfg_agent(self, context, payload):
"""Deal with hosting devices assigned to this config agent."""
LOG.debug("Got hosting device assigned, payload: %s" % payload)
try:
if payload['hosting_device_ids']:
#TODO(hareeshp): implement assignment of hosting devices
self.routing_service_helper.fullsync = True
except KeyError as e:
LOG.error("Invalid payload format for received RPC message "
"`hosting_devices_assigned_to_cfg_agent`. Error is "
"%(error)s. Payload is %(payload)s",
{'error': e, 'payload': payload}) | Deal with hosting devices assigned to this config agent. | entailment |
def hosting_devices_unassigned_from_cfg_agent(self, context, payload):
"""Deal with hosting devices unassigned from this config agent."""
try:
if payload['hosting_device_ids']:
#TODO(hareeshp): implement unassignment of hosting devices
pass
except KeyError as e:
LOG.error("Invalid payload format for received RPC message "
"`hosting_devices_unassigned_from_cfg_agent`. Error "
"is %(error)s. Payload is %(payload)s",
{'error': e, 'payload': payload}) | Deal with hosting devices unassigned from this config agent. | entailment |
def hosting_devices_removed(self, context, payload):
"""Deal with hosting device removed RPC message."""
try:
if payload['hosting_data']:
if payload['hosting_data'].keys():
self.process_services(removed_devices_info=payload)
except KeyError as e:
LOG.error("Invalid payload format for received RPC message "
"`hosting_devices_removed`. Error is %(error)s. Payload "
"is %(payload)s", {'error': e, 'payload': payload}) | Deal with hosting device removed RPC message. | entailment |
def _agent_registration(self):
"""Register this agent with the server.
This method registers the cfg agent with the neutron server so hosting
devices can be assigned to it. In case the server is not ready to
accept registration (it sends a False) then we retry registration
for `MAX_REGISTRATION_ATTEMPTS` with a delay of
`REGISTRATION_RETRY_DELAY`. If there is no server response or a
failure to register after the required number of attempts,
the agent stops itself.
"""
for attempts in range(MAX_REGISTRATION_ATTEMPTS):
context = bc.context.get_admin_context_without_session()
self.send_agent_report(self.agent_state, context)
try:
res = self.devmgr_rpc.register_for_duty(context)
except Exception:
res = False
LOG.warning("[Agent registration] Rpc exception. Neutron "
"may not be available or busy. Retrying "
"in %0.2f seconds ", REGISTRATION_RETRY_DELAY)
if res is True:
LOG.info("[Agent registration] Agent successfully registered")
return
elif res is False:
LOG.warning("[Agent registration] Neutron server said "
"that device manager was not ready. Retrying "
"in %0.2f seconds ", REGISTRATION_RETRY_DELAY)
time.sleep(REGISTRATION_RETRY_DELAY)
elif res is None:
LOG.error("[Agent registration] Neutron server said that "
"no device manager was found. Cannot continue. "
"Exiting!")
raise SystemExit(_("Cfg Agent exiting"))
LOG.error("[Agent registration] %d unsuccessful registration "
"attempts. Exiting!", MAX_REGISTRATION_ATTEMPTS)
raise SystemExit(_("Cfg Agent exiting")) | Register this agent with the server.
This method registers the cfg agent with the neutron server so hosting
devices can be assigned to it. In case the server is not ready to
accept registration (it sends a False) then we retry registration
for `MAX_REGISTRATION_ATTEMPTS` with a delay of
`REGISTRATION_RETRY_DELAY`. If there is no server response or a
failure to register after the required number of attempts,
the agent stops itself. | entailment |
def _report_state(self):
"""Report state to the plugin.
This task run every `keepalive_interval` period.
Collects, creates and sends a summary of the services currently
managed by this agent. Data is collected from the service helper(s).
Refer the `configurations` dict for the parameters reported.
:return: None
"""
LOG.debug("Report state task started")
self.keepalive_iteration += 1
if self.keepalive_iteration == self.report_iteration:
self._prepare_full_report_data()
self.keepalive_iteration = 0
LOG.debug("State report: %s", pprint.pformat(self.agent_state))
else:
self.agent_state.pop('configurations', None)
self.agent_state['local_time'] = datetime.now().strftime(
ISO8601_TIME_FORMAT)
LOG.debug("State report: %s", self.agent_state)
self.send_agent_report(self.agent_state, self.context) | Report state to the plugin.
This task run every `keepalive_interval` period.
Collects, creates and sends a summary of the services currently
managed by this agent. Data is collected from the service helper(s).
Refer the `configurations` dict for the parameters reported.
:return: None | entailment |
def send_agent_report(self, report, context):
"""Send the agent report via RPC."""
try:
self.state_rpc.report_state(context, report, self.use_call)
report.pop('start_flag', None)
self.use_call = False
LOG.debug("Send agent report successfully completed")
except AttributeError:
# This means the server does not support report_state
LOG.warning("Neutron server does not support state report. "
"State report for this agent will be disabled.")
self.heartbeat.stop()
return
except Exception:
LOG.warning("Failed sending agent report!") | Send the agent report via RPC. | entailment |
def _get_vlanid(self, context):
"""Returns vlan_id associated with a bound VLAN segment."""
segment = context.bottom_bound_segment
if segment and self.check_segment(segment):
return segment.get(api.SEGMENTATION_ID) | Returns vlan_id associated with a bound VLAN segment. | entailment |
def _get_physnet(self, context):
"""Returns physnet associated with a bound VLAN segment."""
segment = context.bottom_bound_segment
if segment and self.check_segment(segment):
return segment.get(api.PHYSICAL_NETWORK) | Returns physnet associated with a bound VLAN segment. | entailment |
def update_port_precommit(self, context):
"""Adds port profile and vlan information to the DB.
Assign a port profile to this port. To do that:
1. Get the vlan_id associated with the bound segment
2. Check if a port profile already exists for this vlan_id
3. If yes, associate that port profile with this port.
4. If no, create a new port profile with this vlan_id and
associate with this port
"""
vnic_type = context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = context.current.get(bc.portbindings.PROFILE, {})
host_id = self._get_host_id(
context.current.get(bc.portbindings.HOST_ID))
if not host_id:
LOG.warning('Host id from port context is None. '
'Ignoring this port')
return
vlan_id = self._get_vlanid(context)
if not vlan_id:
LOG.warning('Vlan_id is None. Ignoring this port')
return
ucsm_ip = self.driver.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('Host %s is not controlled by any known '
'UCS Manager.', host_id)
return
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
# This is a neutron virtio port.
# If VNIC templates are configured, that config would
# take precedence and the VLAN is added to the VNIC template.
physnet = self._get_physnet(context)
if not physnet:
LOG.debug('physnet is None. Not modifying VNIC '
'Template config')
else:
# Check if VNIC template is configured for this physnet
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
vnic_template = ucsm.vnic_template_list.get(physnet)
if vnic_template:
LOG.debug('vnic_template %s', vnic_template)
self.ucsm_db.add_vnic_template(vlan_id, ucsm_ip,
vnic_template.name, physnet)
return
else:
LOG.debug('VNIC Template not configured for '
'physnet %s', physnet)
# In the absence of VNIC Templates, VLAN is directly added
# to vNIC(s) on the SP Template.
# Check if SP Template config has been provided. If so, find
# the UCSM that controls this host and the Service Profile
# Template for this host.
sp_template_info = (CONF.ml2_cisco_ucsm.ucsms[
ucsm_ip].sp_template_list.get(host_id))
if sp_template_info:
LOG.debug('SP Template: %s, VLAN_id: %d',
sp_template_info.name, vlan_id)
self.ucsm_db.add_service_profile_template(
vlan_id, sp_template_info.name, ucsm_ip)
return
# If this is an Intel SR-IOV vnic, then no need to create port
# profile on the UCS manager. So no need to update the DB.
if not self.driver.is_vmfex_port(profile):
LOG.debug('This is a SR-IOV port and hence not updating DB.')
return
# This is a Cisco VM-FEX port
p_profile_name = self.make_profile_name(vlan_id)
LOG.debug('Port Profile: %s for VLAN_id: %d', p_profile_name, vlan_id)
# Create a new port profile entry in the db
self.ucsm_db.add_port_profile(p_profile_name, vlan_id, ucsm_ip) | Adds port profile and vlan information to the DB.
Assign a port profile to this port. To do that:
1. Get the vlan_id associated with the bound segment
2. Check if a port profile already exists for this vlan_id
3. If yes, associate that port profile with this port.
4. If no, create a new port profile with this vlan_id and
associate with this port | entailment |
def update_port_postcommit(self, context):
"""Creates a port profile on UCS Manager.
Creates a Port Profile for this VLAN if it does not already
exist.
"""
vlan_id = self._get_vlanid(context)
if not vlan_id:
LOG.warning('Vlan_id is None. Ignoring this port.')
return
if (not self._is_supported_deviceowner(context.current) or
not self._is_status_active(context.current)):
LOG.debug("Unsupported device_owner '%(owner)s' or port not"
" active (vlan_id '%(vlan)d', status %(status)s)."
" Nothing to do.",
{'owner': context.current['device_owner'],
'vlan': vlan_id,
'status': context.current['status']})
return
# Checks to perform before UCS Manager can create a Port Profile.
# 1. Make sure this host is on a known UCS Manager.
host_id = self._get_host_id(
context.current.get(bc.portbindings.HOST_ID))
if not host_id:
LOG.warning('Host id from port context is None. '
'Ignoring this port')
return
ucsm_ip = self.driver.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('Host_id %s is not controlled by any known UCS '
'Manager', str(host_id))
return
profile = context.current.get(bc.portbindings.PROFILE, {})
vnic_type = context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
# 2. Make sure this is a vm_fex_port.(Port profiles are created
# only for VM-FEX ports.)
if (self.driver.check_vnic_type_and_vendor_info(vnic_type, profile) and
self.driver.is_vmfex_port(profile)):
# 3. Make sure update_port_precommit added an entry in the DB
# for this port profile
profile_name = self.ucsm_db.get_port_profile_for_vlan(vlan_id,
ucsm_ip)
# 4. Make sure that the Port Profile hasn't already been created
# on the UCS Manager
if profile_name and self.ucsm_db.is_port_profile_created(vlan_id,
ucsm_ip):
LOG.debug('Port Profile %s for vlan_id %d already exists '
'on UCSM %s.', profile_name, vlan_id, ucsm_ip)
return
# Multi VLAN trunk support
# Check if this network is a trunk network. If so pass the
# additional VLAN ids to the UCSM driver.
network = context.network.current['name']
trunk_vlans = (
CONF.sriov_multivlan_trunk.network_vlans.get(network, []))
# All checks are done. Ask the UCS Manager driver to create the
# above Port Profile.
if self.driver.create_portprofile(profile_name, vlan_id,
vnic_type, host_id, trunk_vlans):
# Port profile created on UCS, record that in the DB.
self.ucsm_db.set_port_profile_created(vlan_id, profile_name,
ucsm_ip)
return
else:
# Enable vlan-id for this Neutron virtual port.
LOG.debug('Host_id is %s', host_id)
physnet = self._get_physnet(context)
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
vnic_template = ucsm.vnic_template_list.get(physnet)
if vnic_template:
LOG.debug('Update VNIC Template for physnet: %s', physnet)
LOG.debug('vnic_template %s', vnic_template)
if (self.driver.update_vnic_template(
host_id, vlan_id, physnet, vnic_template.path,
vnic_template.name)):
LOG.debug('Setting ucsm_updated flag for '
'vlan : %(vlan)d, '
'vnic_template : %(vnic_template)s '
'on ucsm_ip: %(ucsm_ip)s',
{'vlan': vlan_id,
'vnic_template': vnic_template.name,
'ucsm_ip': ucsm_ip})
self.ucsm_db.set_vnic_template_updated(
vlan_id, ucsm_ip, vnic_template.name, physnet)
return
if (CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list and
self.driver.update_service_profile_template(
vlan_id, host_id, ucsm_ip)):
sp_template_info = (CONF.ml2_cisco_ucsm.ucsms[
ucsm_ip].sp_template_list.get(host_id))
if not sp_template_info:
sp_template = None
else:
sp_template = sp_template_info.name
LOG.debug('Setting ucsm_updated flag for vlan : %(vlan)d, '
'sp_template : %(sp_template)s on ucsm_ip: '
'%(ucsm_ip)s', {'vlan': vlan_id,
'sp_template': sp_template, 'ucsm_ip': ucsm_ip})
self.ucsm_db.set_sp_template_updated(vlan_id, sp_template,
ucsm_ip)
else:
self.driver.update_serviceprofile(host_id, vlan_id) | Creates a port profile on UCS Manager.
Creates a Port Profile for this VLAN if it does not already
exist. | entailment |
def delete_network_precommit(self, context):
"""Delete entry corresponding to Network's VLAN in the DB."""
segments = context.network_segments
for segment in segments:
if not self.check_segment(segment):
return # Not a vlan network
vlan_id = segment.get(api.SEGMENTATION_ID)
if not vlan_id:
return # No vlan assigned to segment
# For VM-FEX ports
self.ucsm_db.delete_vlan_entry(vlan_id)
# For Neutron virtio ports
if any([True for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if ucsm.sp_template_list]):
# At least on UCSM has sp templates configured
self.ucsm_db.delete_sp_template_for_vlan(vlan_id)
if any([True for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if ucsm.vnic_template_list]):
# At least one UCSM has vnic templates configured
self.ucsm_db.delete_vnic_template_for_vlan(vlan_id) | Delete entry corresponding to Network's VLAN in the DB. | entailment |
def delete_network_postcommit(self, context):
"""Delete all configuration added to UCS Manager for the vlan_id."""
segments = context.network_segments
network_name = context.current['name']
for segment in segments:
if not self.check_segment(segment):
return # Not a vlan network
vlan_id = segment.get(api.SEGMENTATION_ID)
if not vlan_id:
return # No vlan assigned to segment
port_profile = self.make_profile_name(vlan_id)
trunk_vlans = (
CONF.sriov_multivlan_trunk.network_vlans.get(network_name, []))
self.driver.delete_all_config_for_vlan(vlan_id, port_profile,
trunk_vlans) | Delete all configuration added to UCS Manager for the vlan_id. | entailment |
def bind_port(self, context):
"""Binds port to current network segment.
Binds port only if the vnic_type is direct or macvtap and
the port is from a supported vendor. While binding port set it
in ACTIVE state and provide the Port Profile or Vlan Id as part
vif_details.
"""
vnic_type = context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
LOG.debug('Attempting to bind port %(port)s with vnic_type '
'%(vnic_type)s on network %(network)s ',
{'port': context.current['id'],
'vnic_type': vnic_type,
'network': context.network.current['id']})
profile = context.current.get(bc.portbindings.PROFILE, {})
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
return
for segment in context.network.network_segments:
if self.check_segment(segment):
vlan_id = segment[api.SEGMENTATION_ID]
if not vlan_id:
LOG.warning('Cannot bind port: vlan_id is None.')
return
LOG.debug("Port binding to Vlan_id: %s", str(vlan_id))
# Check if this is a Cisco VM-FEX port or Intel SR_IOV port
if self.driver.is_vmfex_port(profile):
profile_name = self.make_profile_name(vlan_id)
self.vif_details[
const.VIF_DETAILS_PROFILEID] = profile_name
else:
self.vif_details[
bc.portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
bc.constants.PORT_STATUS_ACTIVE)
return
LOG.error('UCS Mech Driver: Failed binding port ID %(id)s '
'on any segment of network %(network)s',
{'id': context.current['id'],
'network': context.network.current['id']}) | Binds port to current network segment.
Binds port only if the vnic_type is direct or macvtap and
the port is from a supported vendor. While binding port set it
in ACTIVE state and provide the Port Profile or Vlan Id as part
vif_details. | entailment |
def update_rule_entry(self, rule_info):
"""Update the rule_info list."""
if rule_info.get('status') == 'up':
self.add_rule_entry(rule_info)
if rule_info.get('status') == 'down':
self.remove_rule_entry(rule_info) | Update the rule_info list. | entailment |
def add_rule_entry(self, rule_info):
"""Add host data object to the rule_info list."""
new_rule = IpMacPort(rule_info.get('ip'), rule_info.get('mac'),
rule_info.get('port'))
LOG.debug('Added rule info %s to the list', rule_info)
self.rule_info.append(new_rule) | Add host data object to the rule_info list. | entailment |
def remove_rule_entry(self, rule_info):
"""Remove host data object from rule_info list."""
temp_list = list(self.rule_info)
for rule in temp_list:
if (rule.ip == rule_info.get('ip') and
rule.mac == rule_info.get('mac') and
rule.port == rule_info.get('port')):
LOG.debug('Removed rule info %s from the list', rule_info)
self.rule_info.remove(rule) | Remove host data object from rule_info list. | entailment |
def _find_chain_name(self, mac):
"""Find a rule associated with a given mac."""
ipt_cmd = ['iptables', '-t', 'filter', '-S']
cmdo = dsl.execute(ipt_cmd, root_helper=self._root_helper,
log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
chain = o.split()[1]
LOG.info('Find %(chain)s for %(mac)s.',
{'chain': chain, 'mac': mac})
return chain | Find a rule associated with a given mac. | entailment |
def _find_rule_no(self, mac):
"""Find rule number associated with a given mac."""
ipt_cmd = ['iptables', '-L', '--line-numbers']
cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
rule_no = o.split()[0]
LOG.info('Found rule %(rule)s for %(mac)s.',
{'rule': rule_no, 'mac': mac})
return rule_no | Find rule number associated with a given mac. | entailment |
def update_ip_rule(self, ip, mac):
"""Update a rule associated with given ip and mac."""
rule_no = self._find_rule_no(mac)
chain = self._find_chain_name(mac)
if not rule_no or not chain:
LOG.error('Failed to update ip rule for %(ip)s %(mac)s',
{'ip': ip, 'mac': mac})
return
update_cmd = ['iptables', '-R', '%s' % chain, '%s' % rule_no,
'-s', '%s/32' % ip, '-m', 'mac', '--mac-source',
'%s' % mac, '-j', 'RETURN']
LOG.debug('Execute command: %s', update_cmd)
dsl.execute(update_cmd, self._root_helper, log_output=False) | Update a rule associated with given ip and mac. | entailment |
def enqueue_event(self, event):
"""Enqueue the given event.
The event contains host data (ip, mac, port) which will be used to
update the spoofing rule for the host in the iptables.
"""
LOG.debug('Enqueue iptable event %s.', event)
if event.get('status') == 'up':
for rule in self.rule_info:
if (rule.mac == event.get('mac').lower() and
rule.port == event.get('port')):
# Entry already exist in the list.
if rule.ip != event.get('ip'):
LOG.debug('enqueue_event: Only updating IP from %s'
' to %s.' % (rule.ip, event.get('ip')))
# Only update the IP address if it is different.
rule.ip = event.get('ip')
return
self._iptq.put(event) | Enqueue the given event.
The event contains host data (ip, mac, port) which will be used to
update the spoofing rule for the host in the iptables. | entailment |
def update_iptables(self):
"""Update iptables based on information in the rule_info."""
# Read the iptables
iptables_cmds = ['iptables-save', '-c']
all_rules = dsl.execute(iptables_cmds, root_helper=self._root_helper,
log_output=False)
# For each rule in rule_info update the rule if necessary.
new_rules = []
is_modified = False
for line in all_rules.split('\n'):
new_line = line
line_content = line.split()
# The spoofing rule which includes mac and ip should have
# -s cidr/32 option for ip address. Otherwise no rule
# will be modified.
if '-s' in line_content:
tmp_rule_info = list(self.rule_info)
for rule in tmp_rule_info:
if (rule.mac in line.lower() and
rule.chain.lower() in line.lower() and
not self._is_ip_in_rule(rule.ip, line_content)):
ip_loc = line_content.index('-s') + 1
line_content[ip_loc] = rule.ip + '/32'
new_line = ' '.join(line_content)
LOG.debug('Modified %(old_rule)s. '
'New rule is %(new_rule)s.' % (
{'old_rule': line,
'new_rule': new_line}))
is_modified = True
new_rules.append(new_line)
if is_modified and new_rules:
# Updated all the rules. Now commit the new rules.
iptables_cmds = ['iptables-restore', '-c']
dsl.execute(iptables_cmds, process_input='\n'.join(new_rules),
root_helper=self._root_helper, log_output=False) | Update iptables based on information in the rule_info. | entailment |
def process_rule_info(self):
"""Task responsible for processing event queue."""
while True:
try:
event = self._iptq.get(block=False)
LOG.debug('Dequeue event: %s.', event)
self.update_rule_entry(event)
except queue.Empty:
self.update_iptables()
time.sleep(1)
except Exception:
LOG.exception('ERROR: failed to process queue') | Task responsible for processing event queue. | entailment |
def setup(self, **kwargs):
"""setup ASA context for an edge tenant pair. """
params = kwargs.get('params')
LOG.info("asa_setup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
" %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s",
{'tenant': params.get('tenant_name'),
'in_vlan': params.get('in_vlan'),
'out_vlan': params.get('out_vlan'),
'in_ip': params.get('in_ip'),
'in_mask': params.get('in_mask'),
'out_ip': params.get('out_ip'),
'out_mask': params.get('out_mask')})
inside_vlan = str(params.get('in_vlan'))
outside_vlan = str(params.get('out_vlan'))
context = params.get('tenant_name')
cmds = ["conf t", "changeto system"]
inside_int = params.get('intf_in') + '.' + inside_vlan
cmds.append("int " + inside_int)
cmds.append("vlan " + inside_vlan)
outside_int = params.get('intf_out') + '.' + outside_vlan
cmds.append("int " + outside_int)
cmds.append("vlan " + outside_vlan)
cmds.append("context " + context)
cmds.append("allocate-interface " + inside_int)
cmds.append("allocate-interface " + outside_int)
cmds.append("config-url disk0:/" + context + ".cfg")
cmds.append("write memory")
cmds.append("changeto context " + context)
cmds.append("int " + inside_int)
cmds.append("nameif Inside")
cmds.append("security-level 100")
cmds.append(
"ip address " + params.get('in_ip') + " " + params.get('in_mask'))
cmds.append("int " + outside_int)
cmds.append("nameif Outside")
cmds.append("security-level 0")
cmds.append("ip address " + params.get('out_ip') + " " +
params.get('out_mask'))
cmds.append("router ospf 1")
cmds.append("network " + params.get('in_ip') + " " +
params.get('in_mask') + " area 0")
cmds.append("network " + params.get('out_ip') + " " +
params.get('out_mask') + " area 0")
cmds.append("area 0")
cmds.append("route Outside 0.0.0.0 0.0.0.0 " + params.get('out_gw') +
" 1")
cmds.append("route Outside 0.0.0.0 0.0.0.0 " +
params.get('out_sec_gw') + " 1")
cmds.append("end")
cmds.append("write memory")
if context not in self.tenant_rule:
self.tenant_rule[context] = dict()
self.tenant_rule[context]['rule_lst'] = []
data = {"commands": cmds}
return self.rest_send_cli(data) | setup ASA context for an edge tenant pair. | entailment |
def cleanup(self, **kwargs):
"""cleanup ASA context for an edge tenant pair. """
params = kwargs.get('params')
LOG.info("asa_cleanup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
" %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s",
{'tenant': params.get('tenant_name'),
'in_vlan': params.get('in_vlan'),
'out_vlan': params.get('out_vlan'),
'in_ip': params.get('in_ip'),
'in_mask': params.get('in_mask'),
'out_ip': params.get('out_ip'),
'out_mask': params.get('out_mask')})
inside_vlan = str(params.get('in_vlan'))
outside_vlan = str(params.get('out_vlan'))
context = params.get('tenant_name')
cmds = ["conf t", "changeto system"]
cmds.append("no context " + context + " noconfirm")
inside_int = params.get('intf_in') + '.' + inside_vlan
outside_int = params.get('intf_out') + '.' + outside_vlan
cmds.append("no interface " + inside_int)
cmds.append("no interface " + outside_int)
cmds.append("write memory")
cmds.append("del /noconfirm disk0:/" + context + ".cfg")
if context in self.tenant_rule:
for rule in self.tenant_rule[context].get('rule_lst'):
del self.rule_tbl[rule]
del self.tenant_rule[context]
data = {"commands": cmds}
return self.rest_send_cli(data) | cleanup ASA context for an edge tenant pair. | entailment |
def build_acl_ip(self, network_obj):
"Build the acl for IP address. "
if str(network_obj) == '0.0.0.0/0':
acl = "any "
else:
acl = "%(ip)s %(mask)s " % {'ip': network_obj.network,
'mask': network_obj.netmask}
return acl | Build the acl for IP address. | entailment |
def build_acl_port(self, port, enabled=True):
"Build the acl for L4 Ports. "
if port is not None:
if ':' in port:
range = port.replace(':', ' ')
acl = "range %(range)s " % {'range': range}
else:
acl = "eq %(port)s " % {'port': port}
if not enabled:
acl += "inactive"
return acl | Build the acl for L4 Ports. | entailment |
def build_acl(self, tenant_name, rule):
"""Build the ACL. """
# TODO(padkrish) actions that is not deny or allow, throw error
if rule['action'] == 'allow':
action = 'permit'
else:
action = 'deny'
acl_str = "access-list %(tenant)s extended %(action)s %(prot)s "
acl = acl_str % {'tenant': tenant_name, 'action': action,
'prot': rule.get('protocol')}
src_ip = self.get_ip_address(rule.get('source_ip_address'))
ip_acl = self.build_acl_ip(src_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('source_port'))
dst_ip = self.get_ip_address(rule.get('destination_ip_address'))
ip_acl = self.build_acl_ip(dst_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('destination_port'),
enabled=rule.get('enabled'))
return acl | Build the ACL. | entailment |
def apply_policy(self, policy):
"""Apply a firewall policy. """
tenant_name = policy['tenant_name']
fw_id = policy['fw_id']
fw_name = policy['fw_name']
LOG.info("asa_apply_policy: tenant=%(tenant)s fw_id=%(fw_id)s "
"fw_name=%(fw_name)s",
{'tenant': tenant_name, 'fw_id': fw_id, 'fw_name': fw_name})
cmds = ["conf t", "changeto context " + tenant_name]
for rule_id, rule in policy['rules'].items():
acl = self.build_acl(tenant_name, rule)
LOG.info("rule[%(rule_id)s]: name=%(name)s enabled=%(enabled)s"
" protocol=%(protocol)s dport=%(dport)s "
"sport=%(sport)s dip=%(dport)s "
"sip=%(sip)s action=%(dip)s",
{'rule_id': rule_id, 'name': rule.get('name'),
'enabled': rule.get('enabled'),
'protocol': rule.get('protocol'),
'dport': rule.get('dst_port'),
'sport': rule.get('src_port'),
'dip': rule.get('destination_ip_address'),
'sip': rule.get('source_ip_address'),
'action': rule.get('action')})
# remove the old ace for this rule
if rule_id in self.rule_tbl:
cmds.append('no ' + self.rule_tbl[rule_id])
self.rule_tbl[rule_id] = acl
if tenant_name in self.tenant_rule:
if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:
self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)
cmds.append(acl)
cmds.append("access-group " + tenant_name + " global")
cmds.append("write memory")
LOG.info("cmds sent is %s", cmds)
data = {"commands": cmds}
return self.rest_send_cli(data) | Apply a firewall policy. | entailment |
def _flow_check_handler_internal(self):
"""Periodic handler to check if installed flows are present.
This handler runs periodically to check if installed flows are present.
This function cannot detect and delete the stale flows, if present.
It requires more complexity to delete stale flows. Generally, stale
flows are not present. So, that logic is not put here.
"""
integ_flow = self.integ_br_obj.dump_flows_for(
in_port=self.int_peer_port_num)
ext_flow = self.ext_br_obj.dump_flows_for(
in_port=self.phy_peer_port_num)
for net_uuid, lvm in six.iteritems(self.local_vlan_map):
vdp_vlan = lvm.any_consistent_vlan()
flow_required = False
if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)):
return
if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on Integ bridge",
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on External bridge",
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if flow_required:
LOG.info("Programming flows for lvid %(lvid)s vdp vlan"
" %(vdp)s",
{'lvid': lvm.lvid, 'vdp': vdp_vlan})
self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan) | Periodic handler to check if installed flows are present.
This handler runs periodically to check if installed flows are present.
This function cannot detect and delete the stale flows, if present.
It requires more complexity to delete stale flows. Generally, stale
flows are not present. So, that logic is not put here. | entailment |
def _flow_check_handler(self):
"""Top level routine to check OVS flow consistency. """
LOG.info("In _flow_check_handler")
try:
with self.ovs_vdp_lock:
self._flow_check_handler_internal()
except Exception as e:
LOG.error("Exception in _flow_check_handler_internal %s",
str(e)) | Top level routine to check OVS flow consistency. | entailment |
def gen_veth_str(self, const_str, intf_str):
"""Generate a veth string.
Concatenates the constant string with remaining available length
of interface string from trailing position.
"""
avl_len = constants.MAX_VETH_NAME - len(const_str)
if avl_len <= 0:
LOG.error("veth string name too short")
raise dfae.DfaAgentFailed(reason="Veth Unavailable")
start_pos = len(intf_str) - avl_len
veth_str = const_str + intf_str[start_pos:]
return veth_str | Generate a veth string.
Concatenates the constant string with remaining available length
of interface string from trailing position. | entailment |
def setup_lldpad_ports(self):
"""Setup the flows for passing LLDP/VDP frames in OVS. """
# Creating the physical bridge and setting up patch ports is done by
# OpenStack
ovs_bridges = ovs_lib.get_bridges(self.root_helper)
if self.ext_br not in ovs_bridges or self.integ_br not in ovs_bridges:
self.uplink_fail_reason = cconstants.bridge_not_cfgd_reason % (
ovs_bridges, self.integ_br, self.ext_br)
LOG.error("%s", self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
br = ovs_lib.OVSBridge(self.ext_br, root_helper=self.root_helper)
self.ext_br_obj = br
int_br = ovs_lib.OVSBridge(self.integ_br, root_helper=self.root_helper)
self.integ_br_obj = int_br
self.phy_peer_port, self.int_peer_port = self.find_interconnect_ports()
if self.phy_peer_port is None or self.int_peer_port is None:
self.uplink_fail_reason = cconstants.veth_not_cfgd_reason % (
self.phy_peer_port, self.int_peer_port)
LOG.error("%s", self.uplink_fail_reason)
raise dfae.DfaAgentFailed(reason=self.uplink_fail_reason)
lldp_ovs_veth_str = constants.LLDPAD_OVS_VETH_PORT + self.uplink
if len(lldp_ovs_veth_str) > constants.MAX_VETH_NAME:
lldp_ovs_veth_str = self.gen_veth_str(
constants.LLDPAD_OVS_VETH_PORT,
self.uplink)
lldp_loc_veth_str = constants.LLDPAD_LOC_VETH_PORT + self.uplink
if len(lldp_loc_veth_str) > constants.MAX_VETH_NAME:
lldp_loc_veth_str = self.gen_veth_str(
constants.LLDPAD_LOC_VETH_PORT,
self.uplink)
ip_wrapper = ip_lib.IPWrapper()
self.delete_vdp_flows()
br.delete_port(lldp_ovs_veth_str)
if ip_lib.device_exists(lldp_ovs_veth_str):
# What about OVS restart cases fixme(padkrish)
# IMPORTANT.. The link delete should be done only for non-restart
# cases. Otherwise, The MAC address of the veth interface changes
# for every delete/create. So, if lldpad has the association sent
# already, retriggering it will make the ASSOC appear as coming
# from another station and more than one VSI instance will appear
# at the Leaf. Deleting the assoc and creating the assoc for new
# veth is not optimal. fixme(padkrish)
# ip_lib.IPDevice(lldp_ovs_veth_str,self.root_helper).link.delete()
lldp_loc_veth = ip_wrapper.device(lldp_loc_veth_str)
lldp_ovs_veth = ip_wrapper.device(lldp_ovs_veth_str)
else:
# fixme(padkrish) Due to above reason, do the vethcreate below only
# if it doesn't exist and not deleted.
lldp_loc_veth, lldp_ovs_veth = (
ip_wrapper.add_veth(lldp_loc_veth_str,
lldp_ovs_veth_str))
if not br.port_exists(self.uplink):
phy_port_num = br.add_port(self.uplink)
else:
phy_port_num = br.get_port_ofport(self.uplink)
if phy_port_num == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.invalid_uplink_ofport_reason
LOG.error("%s", self.uplink_fail_reason)
return False
if not br.port_exists(lldp_ovs_veth_str):
lldp_ovs_portnum = br.add_port(lldp_ovs_veth)
else:
lldp_ovs_portnum = br.get_port_ofport(lldp_ovs_veth)
if lldp_ovs_portnum == cconstants.INVALID_OFPORT:
self.uplink_fail_reason = cconstants.lldp_ofport_not_detect_reason
LOG.error("%s", self.uplink_fail_reason)
return False
lldp_loc_veth.link.set_up()
lldp_ovs_veth.link.set_up()
# What about OVS restart cases fixme(padkrish)
self.program_vdp_flows(lldp_ovs_portnum, phy_port_num)
self.phy_peer_port_num = br.get_port_ofport(self.phy_peer_port)
self.int_peer_port_num = int_br.get_port_ofport(self.int_peer_port)
if (self.phy_peer_port_num == cconstants.INVALID_OFPORT or
self.int_peer_port_num == cconstants.INVALID_OFPORT):
self.uplink_fail_reason = cconstants.invalid_peer_ofport_reason % (
self.phy_peer_port_num, self.int_peer_port_num)
LOG.error("%s", self.uplink_fail_reason)
return False
self.lldpad_info = (lldpad.LldpadDriver(lldp_loc_veth_str, self.uplink,
self.root_helper))
ret = self.lldpad_info.enable_evb()
if not ret:
self.uplink_fail_reason = cconstants.evb_cfg_fail_reason
LOG.error("%s", self.uplink_fail_reason)
return False
self.lldp_local_veth_port = lldp_loc_veth_str
self.lldp_ovs_veth_port = lldp_ovs_veth_str
LOG.info("Setting up lldpad ports complete")
return True | Setup the flows for passing LLDP/VDP frames in OVS. | entailment |
def find_interconnect_ports(self):
"""Find the internal veth or patch ports. """
phy_port_list = self.ext_br_obj.get_port_name_list()
int_port_list = self.integ_br_obj.get_port_name_list()
for port in phy_port_list:
# Use get Interface xxx type
is_patch = ovs_lib.is_patch(self.root_helper, port)
if is_patch:
# Get the peer for this patch
peer_port = ovs_lib.get_peer(self.root_helper, port)
if peer_port in int_port_list:
return port, peer_port
# A solution is needed for veth pairs also, fixme(padkrish)
# ip_wrapper.get_devices() returns all the devices
# Pick the ones whose type is veth (?) and get the other pair
# Combination of "ethtool -S xxx" command and "ip tool" command.
return None, None | Find the internal veth or patch ports. | entailment |
def send_vdp_port_event_internal(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui):
"""Send vNIC UP/Down event to VDP.
:param port_uuid: a ovslib.VifPort object.
:mac: MAC address of the VNIC
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
:oui: OUI Parameters
"""
lldpad_port = self.lldpad_info
if not lldpad_port:
fail_reason = "There is no LLDPad port available."
LOG.error("%s", fail_reason)
return {'result': False, 'fail_reason': fail_reason}
if status == 'up':
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
port_name = self.ext_br_obj.get_ofport_name(port_uuid)
if port_name is None:
fail_reason = "Unknown portname for uuid %s" % (port_uuid)
LOG.error("%s", fail_reason)
return {'result': False, 'fail_reason': fail_reason}
LOG.info("Status up: portname for uuid %(uuid)s is %(port)s",
{'uuid': port_uuid, 'port': port_name})
ret = self.port_up_segment_mode(lldpad_port, port_name,
port_uuid, mac, net_uuid,
segmentation_id, oui)
else:
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
LOG.info("Status down for portname uuid %s", port_uuid)
ret = self.port_down_segment_mode(lldpad_port, port_uuid,
mac, net_uuid,
segmentation_id, oui)
return ret | Send vNIC UP/Down event to VDP.
:param port_uuid: a ovslib.VifPort object.
:mac: MAC address of the VNIC
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
:oui: OUI Parameters | entailment |
def send_vdp_port_event(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui):
"""Send vNIC UP/Down event to VDP.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
"""
try:
with self.ovs_vdp_lock:
ret = self.send_vdp_port_event_internal(port_uuid, mac,
net_uuid,
segmentation_id,
status, oui)
return ret
except Exception as e:
LOG.error("Exception in send_vdp_port_event %s" % str(e))
return {'result': False, 'fail_reason': str(e)} | Send vNIC UP/Down event to VDP.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down' | entailment |
def get_lvid_vdp_vlan(self, net_uuid, port_uuid):
"""Retrieve the Local Vlan ID and VDP Vlan. """
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error("lvm not yet created, get_lvid_vdp_lan "
"return error")
return cconstants.INVALID_VLAN, cconstants.INVALID_VLAN
vdp_vlan = lvm.get_portid_vlan(port_uuid)
lvid = lvm.lvid
LOG.info("Return from lvid_vdp_vlan lvid %(lvid)s vdp_vlan %(vdp)s",
{'lvid': lvid, 'vdp': vdp_vlan})
return lvid, vdp_vlan | Retrieve the Local Vlan ID and VDP Vlan. | entailment |
def unprovision_vdp_overlay_networks(self, net_uuid, lvid, vdp_vlan, oui):
"""Unprovisions a overlay type network configured using VDP.
:param net_uuid: the uuid of the network associated with this vlan.
:lvid: Local VLAN ID
:vdp_vlan: VDP VLAN ID
:oui: OUI Parameters
"""
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error("Cannot unprovision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid ",
{'net_uuid': net_uuid})
return
LOG.info('unprovision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s',
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, vdp_vlan, 0) | Unprovisions a overlay type network configured using VDP.
:param net_uuid: the uuid of the network associated with this vlan.
:lvid: Local VLAN ID
:vdp_vlan: VDP VLAN ID
:oui: OUI Parameters | entailment |
def vdp_vlan_change_internal(self, vsw_cb_data, vdp_vlan, fail_reason):
"""Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads.
"""
LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan)
if not vsw_cb_data:
LOG.error("NULL vsw_cb_data Info received")
return
net_uuid = vsw_cb_data.get('net_uuid')
port_uuid = vsw_cb_data.get('port_uuid')
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error("Network %s is not in the local vlan map", net_uuid)
return
lldpad_port = self.lldpad_info
if not lldpad_port:
LOG.error("There is no LLDPad port available.")
return
exist_vdp_vlan = lvm.late_binding_vlan
lvid = lvm.vlan
LOG.debug("lvid %(lvid)s exist %(vlan)s",
{'lvid': lvid, 'vlan': exist_vdp_vlan})
lvm.decr_reset_vlan(port_uuid, vdp_vlan)
lvm.set_fail_reason(port_uuid, fail_reason)
self.vdp_vlan_cb(port_uuid, lvid, vdp_vlan, fail_reason)
if vdp_vlan == exist_vdp_vlan:
LOG.debug("No change in provider VLAN %s", vdp_vlan)
return
# Logic is if the VLAN changed to 0, clear the flows only if none of
# the VM's in the network has a valid VLAN.
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
if ovs_lib.is_valid_vlan_tag(exist_vdp_vlan) and not (
lvm.any_valid_vlan()):
# Clear the old flows
LOG.debug("Clearing flows, no valid vlans")
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, 0)
lvm.late_binding_vlan = 0
lvm.vdp_nego_req = False
else:
# If any VM gets a VLAN change, we immediately modify the flow.
# This is done to not wait for all VM's VLAN getting updated from
# switch. Logic is if any VM gts a new VLAN, the other VM's of the
# same network will be updated eventually.
if vdp_vlan != exist_vdp_vlan and (
ovs_lib.is_valid_vlan_tag(vdp_vlan)):
# Add the new flows and remove the old flows
LOG.warning("Non Zero VDP Vlan change %s %s" %
(vdp_vlan, exist_vdp_vlan))
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, vdp_vlan)
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False
else:
LOG.error("Invalid or same VLAN Exist %(exist)s "
"New %(new)s VLANs",
{'exist': exist_vdp_vlan, 'new': vdp_vlan}) | Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads. | entailment |
def vdp_vlan_change(self, vsw_cb_data, vdp_vlan, fail_reason):
"""Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads.
"""
LOG.debug("In VDP VLAN change VLAN %s" % vdp_vlan)
try:
with self.ovs_vdp_lock:
self.vdp_vlan_change_internal(vsw_cb_data, vdp_vlan,
fail_reason)
except Exception as e:
LOG.error("Exception in vdp_vlan_change %s" % str(e)) | Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads. | entailment |
def provision_vdp_overlay_networks(self, port_uuid, mac, net_uuid,
segmentation_id, lvid, oui):
"""Provisions a overlay type network configured using VDP.
:param port_uuid: the uuid of the VM port.
:param mac: the MAC address of the VM.
:param net_uuid: the uuid of the network associated with this vlan.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:lvid: Local VLAN ID
:oui: OUI Parameters
"""
lldpad_port = self.lldpad_info
if lldpad_port:
ovs_cb_data = {'obj': self, 'port_uuid': port_uuid, 'mac': mac,
'net_uuid': net_uuid}
vdp_vlan, fail_reason = lldpad_port.send_vdp_vnic_up(
port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id,
mac=mac, new_network=True, oui=oui,
vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data)
else:
fail_reason = "There is no LLDPad port available."
LOG.error("%s", fail_reason)
return {'result': False, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error("Cannot provision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid ",
{'net_uuid': net_uuid})
return {'result': True, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
LOG.info('provision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s',
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, 0, vdp_vlan)
return {'result': True, 'vdp_vlan': vdp_vlan, 'fail_reason': None} | Provisions a overlay type network configured using VDP.
:param port_uuid: the uuid of the VM port.
:param mac: the MAC address of the VM.
:param net_uuid: the uuid of the network associated with this vlan.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:lvid: Local VLAN ID
:oui: OUI Parameters | entailment |
def pop_local_cache(self, port_uuid, mac, net_uuid, lvid, vdp_vlan,
segmentation_id):
"""Populate the local cache after restart. """
LOG.info("Populating the OVS VDP cache with port %(port_uuid)s, "
"mac %(mac)s net %(net_uuid)s lvid %(lvid)s vdpvlan "
"%(vdp_vlan)s seg %(seg)s",
{'port_uuid': port_uuid, 'mac': mac, 'net_uuid': net_uuid,
'lvid': lvid, 'vdp_vlan': vdp_vlan, 'seg': segmentation_id})
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
lvm = LocalVlan(lvid, segmentation_id)
self.local_vlan_map[net_uuid] = lvm
lvm.lvid = lvid
lvm.set_port_uuid(port_uuid, vdp_vlan, None)
if vdp_vlan != cconstants.INVALID_VLAN:
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False | Populate the local cache after restart. | entailment |
def store_dummy_router_net(self, net_id, subnet_id, rtr_id):
"""Storing the router attributes. """
self.dummy_net_id = net_id
self.dummy_subnet_id = subnet_id
self.dummy_router_id = rtr_id | Storing the router attributes. | entailment |
def store_dcnm_net_dict(self, net_dict, direc):
"""Storing the DCNM net dict. """
if direc == 'in':
self.in_dcnm_net_dict = net_dict
else:
self.out_dcnm_net_dict = net_dict | Storing the DCNM net dict. | entailment |
def _parse_subnet(self, subnet_dict):
"""Return the subnet, start, end, gateway of a subnet. """
if not subnet_dict:
return
alloc_pool = subnet_dict.get('allocation_pools')
cidr = subnet_dict.get('cidr')
subnet = cidr.split('/')[0]
start = alloc_pool[0].get('start')
end = alloc_pool[0].get('end')
gateway = subnet_dict.get('gateway_ip')
sec_gateway = subnet_dict.get('secondary_gw')
return {'subnet': subnet, 'start': start, 'end': end,
'gateway': gateway, 'sec_gateway': sec_gateway} | Return the subnet, start, end, gateway of a subnet. | entailment |
def store_dcnm_subnet_dict(self, subnet_dict, direc):
"""Store the subnet attributes and dict. """
if direc == 'in':
self.in_dcnm_subnet_dict = subnet_dict
self.in_subnet_dict = self._parse_subnet(subnet_dict)
else:
self.out_dcnm_subnet_dict = subnet_dict
self.out_subnet_dict = self._parse_subnet(subnet_dict) | Store the subnet attributes and dict. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.