code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if remote_port != self.remote_port:
self.remote_port = remote_port
return True
return False | def remote_port_uneq_store(self, remote_port) | This function saves the port, if different from stored. | 2.603203 | 2.192528 | 1.187306 |
if remote_chassis_id_mac != self.remote_chassis_id_mac:
self.remote_chassis_id_mac = remote_chassis_id_mac
return True
return False | def remote_chassis_id_mac_uneq_store(self, remote_chassis_id_mac) | This function saves the Chassis MAC, if different from stored. | 1.755766 | 1.652559 | 1.062452 |
if remote_port_id_mac != self.remote_port_id_mac:
self.remote_port_id_mac = remote_port_id_mac
return True
return False | def remote_port_id_mac_uneq_store(self, remote_port_id_mac) | This function saves the port MAC, if different from stored. | 1.87009 | 1.696668 | 1.102214 |
if intf not in cls.topo_intf_obj_dict:
LOG.error("Interface %s not configured at all", intf)
return False
intf_obj = cls.topo_intf_obj_dict.get(intf)
return intf_obj.get_lldp_status() | def get_lldp_status(cls, intf) | Retrieves the LLDP status. | 3.133894 | 2.994461 | 1.046564 |
if not all_intf:
self.intf_list = intf_list
else:
self.intf_list = sys_utils.get_all_run_phy_intf()
self.cb = cb
self.intf_attr = {}
self.cfg_lldp_interface_list(self.intf_list) | def _init_cfg_interfaces(self, cb, intf_list=None, all_intf=True) | Configure the interfaces during init time. | 4.631412 | 4.337686 | 1.067715 |
self.intf_list.append(protocol_interface)
self.cfg_lldp_interface(protocol_interface, phy_interface) | def cfg_intf(self, protocol_interface, phy_interface=None) | Called by application to add an interface to the list. | 4.42217 | 3.55864 | 1.242657 |
self.intf_attr[protocol_interface] = TopoIntfAttr(
protocol_interface, phy_interface)
self.store_obj(protocol_interface, self.intf_attr[protocol_interface]) | def create_attr_obj(self, protocol_interface, phy_interface) | Creates the local interface attribute object and stores it. | 4.468397 | 3.981828 | 1.122197 |
flag = False
attr_obj = self.get_attr_obj(intf)
remote_evb_mode = self.pub_lldp.get_remote_evb_mode(tlv_data)
if attr_obj.remote_evb_mode_uneq_store(remote_evb_mode):
flag = True
remote_evb_cfgd = self.pub_lldp.get_remote_evb_cfgd(tlv_data)
if attr_obj.remote_evb_cfgd_uneq_store(remote_evb_cfgd):
flag = True
remote_mgmt_addr = self.pub_lldp.get_remote_mgmt_addr(tlv_data)
if attr_obj.remote_mgmt_addr_uneq_store(remote_mgmt_addr):
flag = True
remote_sys_desc = self.pub_lldp.get_remote_sys_desc(tlv_data)
if attr_obj.remote_sys_desc_uneq_store(remote_sys_desc):
flag = True
remote_sys_name = self.pub_lldp.get_remote_sys_name(tlv_data)
if attr_obj.remote_sys_name_uneq_store(remote_sys_name):
flag = True
remote_port = self.pub_lldp.get_remote_port(tlv_data)
if attr_obj.remote_port_uneq_store(remote_port):
flag = True
remote_chassis_id_mac = self.pub_lldp.\
get_remote_chassis_id_mac(tlv_data)
if attr_obj.remote_chassis_id_mac_uneq_store(remote_chassis_id_mac):
flag = True
remote_port_id_mac = self.pub_lldp.get_remote_port_id_mac(tlv_data)
if attr_obj.remote_port_id_mac_uneq_store(remote_port_id_mac):
flag = True
return flag | def cmp_store_tlv_params(self, intf, tlv_data) | Compare and store the received TLV.
Compares the received TLV with stored TLV. Store the new TLV if it is
different. | 1.563102 | 1.570504 | 0.995287 |
if phy_interface is None:
phy_interface = protocol_interface
self.create_attr_obj(protocol_interface, phy_interface)
ret = self.pub_lldp.enable_lldp(protocol_interface)
attr_obj = self.get_attr_obj(protocol_interface)
attr_obj.update_lldp_status(ret) | def cfg_lldp_interface(self, protocol_interface, phy_interface=None) | Cfg LLDP on interface and create object. | 3.601166 | 3.216765 | 1.119499 |
try:
self._periodic_task_int()
except Exception as exc:
LOG.error("Exception caught in periodic discovery task %s",
str(exc)) | def periodic_discovery_task(self) | Periodic task that checks the interface TLV attributes. | 6.113123 | 5.35688 | 1.141172 |
bond_phy = sys_utils.get_bond_intf(phy_interface)
if sys_utils.is_intf_bond(phy_interface):
bond_intf = phy_interface
else:
bond_intf = bond_phy
# This can be an addition or removal of the interface to a bond.
bond_intf_change = attr_obj.cmp_update_bond_intf(bond_intf)
return bond_intf_change | def _check_bond_interface_change(self, phy_interface, attr_obj) | Check if there's any change in bond interface.
First check if the interface passed itself is a bond-interface and then
retrieve the member list and compare.
Next, check if the interface passed is a part of the bond interface and
then retrieve the member list and compare. | 4.515141 | 4.816975 | 0.93734 |
for intf in self.intf_list:
attr_obj = self.get_attr_obj(intf)
status = attr_obj.get_lldp_status()
if not status:
ret = self.pub_lldp.enable_lldp(intf)
attr_obj.update_lldp_status(ret)
continue
bond_intf_change = self._check_bond_interface_change(
attr_obj.get_phy_interface(), attr_obj)
tlv_data = self.pub_lldp.get_lldp_tlv(intf)
# This should take care of storing the information of interest
if self.cmp_store_tlv_params(intf, tlv_data) or (
attr_obj.get_db_retry_status() or bond_intf_change or (
attr_obj.get_topo_disc_send_cnt() > (
constants.TOPO_DISC_SEND_THRESHOLD))):
# Passing the interface attribute object to CB
ret = self.cb(intf, attr_obj)
status = not ret
attr_obj.store_db_retry_status(status)
attr_obj.reset_topo_disc_send_cnt()
else:
attr_obj.incr_topo_disc_send_cnt() | def _periodic_task_int(self) | Internal periodic discovery task routine to check TLV attributes.
This routine retrieves the LLDP TLC's on all its configured interfaces.
If the retrieved TLC is different than the stored TLV, it invokes the
callback. | 5.07933 | 4.828332 | 1.051985 |
if mgmt_ip not in self.credentials:
return None
security_data = self.credentials[mgmt_ip]
verify = security_data[const.HTTPS_CERT_TUPLE]
if not verify:
verify = security_data[const.HTTPS_VERIFY_TUPLE]
if not refresh and security_data[const.COOKIE_TUPLE]:
return security_data[const.COOKIE_TUPLE], verify
payload = {"aaaUser": {"attributes": {
"name": security_data[const.UNAME_TUPLE],
"pwd": security_data[const.PW_TUPLE]}}}
headers = {"Content-type": "application/json", "Accept": "text/plain"}
url = "{0}://{1}/api/aaaLogin.json".format(DEFAULT_SCHEME, mgmt_ip)
try:
response = self.session.request('POST',
url,
data=jsonutils.dumps(payload),
headers=headers,
verify=verify,
timeout=self.timeout * 2)
except Exception as e:
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e)
self.status = response.status_code
if response.status_code == requests.codes.OK:
cookie = response.headers.get('Set-Cookie')
security_data = (
security_data[const.UNAME_TUPLE:const.COOKIE_TUPLE] +
(cookie,))
self.credentials[mgmt_ip] = security_data
return cookie, verify
else:
e = "REST API connect returned Error code: "
e += str(self.status)
raise cexc.NexusConnectFailed(nexus_host=mgmt_ip,
exc=e) | def _get_cookie(self, mgmt_ip, config, refresh=False) | Performs authentication and retries cookie. | 2.716905 | 2.701997 | 1.005517 |
try:
# The following determines if the switch interfaces are
# in place. If so, make sure they have a basic trunk
# configuration applied to none.
switch_ifs = self._mdriver._get_switch_interfaces(
switch_ip, cfg_only=(False if replay else True))
if not switch_ifs:
LOG.debug("Skipping switch %s which has no configured "
"interfaces",
switch_ip)
return
self._driver.initialize_all_switch_interfaces(
switch_ifs, switch_ip)
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning("Unable to initialize interfaces to "
"switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay init_interface')
if self._mdriver.is_replay_enabled():
return | def _initialize_trunk_interfaces_to_none(self, switch_ip, replay=True) | Initialize all nexus interfaces to trunk allowed none. | 5.363948 | 5.536074 | 0.968908 |
LOG.debug("Replaying config for switch ip %(switch_ip)s",
{'switch_ip': switch_ip})
# Before replaying all config, initialize trunk interfaces
# to none as required. If this fails, the switch may not
# be up all the way. Quit and retry later.
try:
self._initialize_trunk_interfaces_to_none(switch_ip)
except Exception:
return
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
# If configured to set global VXLAN values and
# there exists VXLAN data base entries, then configure
# the "interface nve" entry on the switch.
if (len(nve_bindings) > 0 and
cfg.CONF.ml2_cisco.vxlan_global_config):
LOG.debug("Nexus: Replay NVE Interface")
loopback = self._mdriver.get_nve_loopback(switch_ip)
self._driver.enable_vxlan_feature(switch_ip,
const.NVE_INT_NUM, loopback)
for x in nve_bindings:
try:
self._driver.create_nve_member(switch_ip,
const.NVE_INT_NUM, x.vni, x.mcast_group)
except Exception as e:
LOG.error("Failed to configure nve_member for "
"switch %(switch_ip)s, vni %(vni)s"
"Reason:%(reason)s ",
{'switch_ip': switch_ip, 'vni': x.vni,
'reason': e})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay create_nve_member')
return
try:
port_bindings = nxos_db.get_nexusport_switch_bindings(switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warning("No port entries found for switch ip "
"%(switch_ip)s during replay.",
{'switch_ip': switch_ip})
return
try:
self._mdriver.configure_switch_entries(
switch_ip, port_bindings)
except Exception as e:
LOG.error("Unexpected exception while replaying "
"entries for switch %(switch_ip)s, Reason:%(reason)s ",
{'switch_ip': switch_ip, 'reason': e})
self._mdriver.register_switch_as_inactive(switch_ip,
'replay switch_entries') | def replay_config(self, switch_ip) | Sends pending config data in OpenStack to Nexus. | 3.707212 | 3.68631 | 1.00567 |
switch_connections = self._mdriver.get_all_switch_ips()
for switch_ip in switch_connections:
state = self._mdriver.get_switch_ip_and_active_state(switch_ip)
config_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
contact_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.debug("check_connections() thread %(thid)d, switch "
"%(switch_ip)s state %(state)s "
"contact_failure %(contact_failure)d "
"config_failure %(config_failure)d ",
{'thid': threading.current_thread().ident,
'switch_ip': switch_ip, 'state': state,
'contact_failure': contact_failure,
'config_failure': config_failure})
try:
# Send a simple get nexus type to determine if
# the switch is up
nexus_type = self._driver.get_nexus_type(switch_ip)
except Exception:
if state != const.SWITCH_INACTIVE:
LOG.error("Lost connection to switch ip "
"%(switch_ip)s", {'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_INACTIVE)
else:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
else:
if state == const.SWITCH_RESTORE_S2:
try:
self._mdriver.configure_next_batch_of_vlans(switch_ip)
except Exception as e:
LOG.error("Unexpected exception while replaying "
"entries for switch %(switch_ip)s, "
"Reason:%(reason)s ",
{'switch_ip': switch_ip, 'reason': e})
self._mdriver.register_switch_as_inactive(
switch_ip, 'replay next_vlan_batch')
continue
if state == const.SWITCH_INACTIVE:
self._configure_nexus_type(switch_ip, nexus_type)
LOG.info("Re-established connection to switch "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_RESTORE_S1)
self.replay_config(switch_ip)
# If replay failed, it stops trying to configure db entries
# and sets switch state to inactive so this caller knows
# it failed. If it did fail, we increment the
# retry counter else reset it to 0.
if self._mdriver.get_switch_ip_and_active_state(
switch_ip) == const.SWITCH_INACTIVE:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
LOG.warning("Replay config failed for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
else:
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.info("Replay config successful for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip}) | def check_connections(self) | Check connection between OpenStack to Nexus device. | 2.815733 | 2.766844 | 1.01767 |
try:
loaded_class = runtime_utils.load_class_by_alias_or_classname(
'networking_cisco.ml2.nexus_driver', 'restapi')
return loaded_class(CONF.ml2_cisco.nexus_switches)
except ImportError:
LOG.error("Error loading Nexus Config driver 'restapi'")
raise SystemExit(1) | def _load_nexus_cfg_driver(self) | Load Nexus Config driver.
:raises SystemExit of 1 if driver cannot be loaded | 6.131228 | 5.167556 | 1.186485 |
switch = cfg.CONF.ml2_cisco.nexus_switches.get(switch_ip)
if switch and switch.username and switch.password:
return True
else:
return False | def _switch_defined(self, switch_ip) | Verify this ip address is defined (for Nexus). | 3.958572 | 3.360173 | 1.178086 |
vlan_range = self._get_switch_vlan_range(switch_ip)
sized_range = ''
fr = 0
to = 0
# if vlan_range not empty and haven't met requested size
while size > 0 and vlan_range:
vlan_id, vni = vlan_range.pop(0)
size -= 1
if fr == 0 and to == 0:
fr = vlan_id
to = vlan_id
else:
diff = vlan_id - to
if diff == 1:
to = vlan_id
else:
if fr == to:
sized_range += str(to) + ','
else:
sized_range += str(fr) + '-'
sized_range += str(to) + ','
fr = vlan_id
to = vlan_id
if fr != 0:
if fr == to:
sized_range += str(to)
else:
sized_range += str(fr) + '-'
sized_range += str(to)
self._save_switch_vlan_range(switch_ip, vlan_range)
return sized_range | def _pop_vlan_range(self, switch_ip, size) | Extract a specific number of vlans from storage.
Purpose: Can only send a limited number of vlans
to Nexus at a time.
Sample Use Cases:
1) vlan_range is a list of vlans. If there is a
list 1000, 1001, 1002, thru 2000 and size is 6,
then the result is '1000-1005' and 1006 thru 2000
is pushed back into storage.
2) if the list is 1000, 1003, 1004, 1006 thru 2000
and size is 6, then the result is
'1000, 1003-1004, 1006-1008' and 1009 thru 2000
is pushed back into storage for next time. | 2.39132 | 2.502626 | 0.955524 |
switch_connections = []
try:
bindings = nxos_db.get_reserved_switch_binding()
except excep.NexusPortBindingNotFound:
LOG.error("No switch bindings in the port data base")
bindings = []
for switch in bindings:
switch_connections.append(switch.switch_ip)
return switch_connections | def get_all_switch_ips(self) | Using reserved switch binding get all switch ips. | 5.938074 | 4.922875 | 1.206221 |
try:
switch_info = link_info['switch_info']
if not isinstance(switch_info, dict):
switch_info = jsonutils.loads(switch_info)
except Exception as e:
LOG.error("switch_info can't be decoded: %(exp)s",
{"exp": e})
switch_info = {}
return switch_info | def _get_baremetal_switch_info(self, link_info) | Get switch_info dictionary from context. | 2.823255 | 2.489609 | 1.134015 |
port = context.current
if self.trunk.is_trunk_subport_baremetal(port):
return self._baremetal_set_binding(context)
if not nexus_help.is_baremetal(port):
return False
if bc.portbindings.PROFILE not in port:
return False
profile = port[bc.portbindings.PROFILE]
if 'local_link_information' not in profile:
return False
all_link_info = profile['local_link_information']
selected = False
for link_info in all_link_info:
if 'port_id' not in link_info:
return False
switch_info = self._get_baremetal_switch_info(
link_info)
if 'switch_ip' in switch_info:
switch_ip = switch_info['switch_ip']
else:
return False
if self._switch_defined(switch_ip):
selected = True
else:
LOG.warning("Skip switch %s. Not configured "
"in ini file" % switch_ip)
if not selected:
return False
selected = self._baremetal_set_binding(context, all_link_info)
if selected:
self._init_baremetal_trunk_interfaces(
context.current, context.top_bound_segment)
if self.trunk.is_trunk_parentport(port):
self.trunk.update_subports(port)
return selected | def _supported_baremetal_transaction(self, context) | Verify transaction is complete and for us. | 3.788552 | 3.767745 | 1.005522 |
all_switches = set()
active_switches = set()
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
switch_info = self._get_baremetal_switch_info(link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
all_switches.add(switch_ip)
if self.is_switch_active(switch_ip):
active_switches.add(switch_ip)
return list(all_switches), list(active_switches) | def _get_baremetal_switches(self, port) | Get switch ip addresses from baremetal transaction.
This method is used to extract switch information
from the transaction where VNIC_TYPE is baremetal.
:param port: Received port transaction
:returns: list of all switches
:returns: list of only switches which are active | 2.982368 | 2.849273 | 1.046712 |
connections = []
is_native = False if self.trunk.is_trunk_subport(port) else True
all_link_info = port[bc.portbindings.PROFILE]['local_link_information']
for link_info in all_link_info:
# Extract port info
intf_type, port = nexus_help.split_interface_name(
link_info['port_id'])
# Determine if this switch is to be skipped
switch_info = self._get_baremetal_switch_info(
link_info)
if not switch_info:
continue
switch_ip = switch_info['switch_ip']
# If not for Nexus
if not self._switch_defined(switch_ip):
continue
# Requested connections for only active switches
if (only_active_switch and
not self.is_switch_active(switch_ip)):
continue
ch_grp = 0
if not from_segment:
try:
reserved = nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(
intf_type, port))
if reserved[0].ch_grp > 0:
ch_grp = reserved[0].ch_grp
intf_type, port = nexus_help.split_interface_name(
'', ch_grp)
except excep.NexusHostMappingNotFound:
pass
connections.append((switch_ip, intf_type, port,
is_native, ch_grp))
return connections | def _get_baremetal_connections(self, port,
only_active_switch=False,
from_segment=False) | Get switch ips and interfaces from baremetal transaction.
This method is used to extract switch/interface
information from transactions where VNIC_TYPE is
baremetal.
:param port: Received port transaction
:param only_active_switch: Indicator for selecting
connections with switches that are active
:param from_segment: only return interfaces from the
segment/transaction as opposed to
say port channels which are learned.
:Returns: list of switch_ip, intf_type, port_id, is_native | 4.295836 | 4.048074 | 1.061205 |
# interfaces list requiring switch initialization and
# reserved port and port_binding db entry creation
list_to_init = []
# interfaces list requiring reserved port and port_binding
# db entry creation
inactive_switch = []
connections = self._get_baremetal_connections(
port_seg, False, True)
for switch_ip, intf_type, port, is_native, _ in connections:
try:
nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(intf_type, port))
except excep.NexusHostMappingNotFound:
if self.is_switch_active(switch_ip):
# channel-group added later
list_to_init.append(
(switch_ip, intf_type, port, is_native, 0))
else:
inactive_switch.append(
(switch_ip, intf_type, port, is_native, 0))
# channel_group is appended to tuples in list_to_init
self.driver.initialize_baremetal_switch_interfaces(list_to_init)
host_id = port_seg.get('dns_name')
if host_id is None:
host_id = const.RESERVED_PORT_HOST_ID
# Add inactive list to list_to_init to create RESERVED
# port data base entries
list_to_init += inactive_switch
for switch_ip, intf_type, port, is_native, ch_grp in list_to_init:
nxos_db.add_host_mapping(
host_id,
switch_ip,
nexus_help.format_interface_name(intf_type, port),
ch_grp, False) | def _init_baremetal_trunk_interfaces(self, port_seg, segment) | Initialize baremetal switch interfaces and DB entry.
With baremetal transactions, the interfaces are not
known during initialization so they must be initialized
when the transactions are received.
* Reserved switch entries are added if needed.
* Reserved port entries are added.
* Determine if port channel is configured on the
interface and store it so we know to create a port-channel
binding instead of that defined in the transaction.
In this case, the RESERVED binding is the ethernet interface
with port-channel stored in channel-group field.
When this channel-group is not 0, we know to create a port binding
as a port-channel instead of interface ethernet. | 4.439788 | 4.171437 | 1.064331 |
all_switches = set()
active_switches = set()
try:
host_list = nxos_db.get_host_mappings(host_id)
for mapping in host_list:
all_switches.add(mapping.switch_ip)
if self.is_switch_active(mapping.switch_ip):
active_switches.add(mapping.switch_ip)
except excep.NexusHostMappingNotFound:
pass
return list(all_switches), list(active_switches) | def _get_host_switches(self, host_id) | Get switch IPs from configured host mapping.
This method is used to extract switch information
from transactions where VNIC_TYPE is normal.
Information is extracted from ini file which
is stored in _nexus_switches.
:param host_id: host_name from transaction
:returns: list of all switches
:returns: list of only switches which are active | 2.730201 | 2.463258 | 1.10837 |
host_found = False
host_connections = []
try:
host_ifs = nxos_db.get_host_mappings(host_id)
except excep.NexusHostMappingNotFound:
host_ifs = []
for ifs in host_ifs:
host_found = True
if (only_active_switch and
not self.is_switch_active(ifs.switch_ip)):
continue
intf_type, port = nexus_help.split_interface_name(
ifs.if_id, ifs.ch_grp)
# is_native set to const.NOT_NATIVE for
# VNIC_TYPE of normal
host_connections.append((
ifs.switch_ip, intf_type, port,
const.NOT_NATIVE, ifs.ch_grp))
if not host_found:
LOG.warning(HOST_NOT_FOUND, host_id)
return host_connections | def _get_host_connections(self, host_id,
only_active_switch=False) | Get switch IPs and interfaces from config host mapping.
This method is used to extract switch/interface
information from ini files when VNIC_TYPE is
normal. The ini files contain host to interface
mappings.
:param host_id: Host name from transaction
:param only_active_switch: Indicator for selecting only
connections for switches that are active
:returns: list of switch_ip, intf_type, port_id, is_native | 4.592664 | 3.945182 | 1.16412 |
switch_ifs = []
try:
port_info = nxos_db.get_switch_host_mappings(
requested_switch_ip)
except excep.NexusHostMappingNotFound:
port_info = []
for binding in port_info:
if cfg_only and not binding.is_static:
continue
intf_type, port = nexus_help.split_interface_name(
binding.if_id)
switch_ifs.append(
(requested_switch_ip, intf_type, port,
const.NOT_NATIVE, binding.ch_grp))
return switch_ifs | def _get_switch_interfaces(self, requested_switch_ip, cfg_only=False) | Get switch interfaces from host mapping DB.
For a given switch, this returns all known port
interfaces for a given switch. These have been
learned from received baremetal transactions and
from configuration file.
:param requested_switch_ip: switch_ip
:returns: list of switch_ip, intf_type, port_id, is_native | 5.785502 | 5.139845 | 1.125618 |
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if not nxos_db.get_nve_vni_member_bindings(vni, switch_ip,
device_id):
nxos_db.add_nexusnve_binding(vni, switch_ip, device_id,
mcast_group) | def _configure_nve_db(self, vni, device_id, mcast_group, host_id) | Create the nexus NVE database entry.
Called during update precommit port event. | 3.867462 | 3.586402 | 1.078368 |
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
# If configured to set global VXLAN values then
# If this is the first database entry for this switch_ip
# then configure the "interface nve" entry on the switch.
if cfg.CONF.ml2_cisco.vxlan_global_config:
nve_bindings = nxos_db.get_nve_switch_bindings(switch_ip)
if len(nve_bindings) == 1:
LOG.debug("Nexus: create NVE interface")
loopback = self.get_nve_loopback(switch_ip)
self.driver.enable_vxlan_feature(switch_ip,
const.NVE_INT_NUM, loopback)
# If this is the first database entry for this (VNI, switch_ip)
# then configure the "member vni #" entry on the switch.
member_bindings = nxos_db.get_nve_vni_switch_bindings(vni,
switch_ip)
if len(member_bindings) == 1:
LOG.debug("Nexus: add member")
self.driver.create_nve_member(switch_ip, const.NVE_INT_NUM,
vni, mcast_group) | def _configure_nve_member(self, vni, device_id, mcast_group, host_id) | Add "member vni" configuration to the NVE interface.
Called during update postcommit port event. | 4.053643 | 3.946818 | 1.027066 |
rows = nxos_db.get_nve_vni_deviceid_bindings(vni, device_id)
for row in rows:
nxos_db.remove_nexusnve_binding(vni, row.switch_ip, device_id) | def _delete_nve_db(self, vni, device_id, mcast_group, host_id) | Delete the nexus NVE database entry.
Called during delete precommit port event. | 4.770232 | 4.387255 | 1.087293 |
host_nve_connections = self._get_switch_nve_info(host_id)
for switch_ip in host_nve_connections:
if not nxos_db.get_nve_vni_switch_bindings(vni, switch_ip):
self.driver.delete_nve_member(switch_ip,
const.NVE_INT_NUM, vni)
if (cfg.CONF.ml2_cisco.vxlan_global_config and
not nxos_db.get_nve_switch_bindings(switch_ip)):
self.driver.disable_vxlan_feature(switch_ip) | def _delete_nve_member(self, vni, device_id, mcast_group, host_id) | Remove "member vni" configuration from the NVE interface.
Called during delete postcommit port event. | 4.35369 | 4.28225 | 1.016683 |
connections = self._get_port_connections(port, host_id)
for switch_ip, intf_type, nexus_port, is_native, ch_grp in connections:
port_id = nexus_help.format_interface_name(
intf_type, nexus_port, ch_grp)
try:
nxos_db.get_nexusport_binding(port_id, vlan_id, switch_ip,
device_id)
except excep.NexusPortBindingNotFound:
nxos_db.add_nexusport_binding(port_id, str(vlan_id), str(vni),
switch_ip, device_id,
is_native) | def _configure_nxos_db(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan) | Create the nexus database entry.
Called during update precommit port event. | 3.819573 | 3.590478 | 1.063806 |
if is_provider_vlan:
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
else:
auto_create = True
auto_trunk = True
return auto_create, auto_trunk | def _gather_config_parms(self, is_provider_vlan, vlan_id) | Collect auto_create, auto_trunk from config. | 2.418327 | 1.829534 | 1.321827 |
# This implies VLAN, VNI, and Port are all duplicate.
# Then there is nothing to configure in Nexus.
if duplicate_type == const.DUPLICATE_PORT:
return
auto_create, auto_trunk = self._gather_config_parms(
is_provider_vlan, vlan_id)
# if type DUPLICATE_VLAN, don't create vlan
if duplicate_type == const.DUPLICATE_VLAN:
auto_create = False
if auto_create and auto_trunk:
LOG.debug("Nexus: create vlan %s and add to interface", vlan_id)
self.driver.create_and_trunk_vlan(
switch_ip, vlan_id, intf_type,
nexus_port, vni, is_native)
elif auto_create:
LOG.debug("Nexus: create vlan %s", vlan_id)
self.driver.create_vlan(switch_ip, vlan_id, vni)
elif auto_trunk:
LOG.debug("Nexus: trunk vlan %s", vlan_id)
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, vlan_id,
intf_type, nexus_port, is_native) | def _configure_port_binding(self, is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port, vni) | Conditionally calls vlan and port Nexus drivers. | 3.090689 | 3.035302 | 1.018247 |
if not pvlan_ids:
return []
pvlan_list = list(pvlan_ids)
pvlan_list.sort()
compressed_list = []
begin = -1
prev_vlan = -1
for port_vlan in pvlan_list:
if prev_vlan == -1:
prev_vlan = port_vlan
else:
if (port_vlan - prev_vlan) == 1:
if begin == -1:
begin = prev_vlan
prev_vlan = port_vlan
else:
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%d-%d" % (begin, prev_vlan))
begin = -1
prev_vlan = port_vlan
if begin == -1:
compressed_list.append(str(prev_vlan))
else:
compressed_list.append("%s-%s" % (begin, prev_vlan))
return compressed_list | def _get_compressed_vlan_list(self, pvlan_ids) | Generate a compressed vlan list ready for XML using a vlan set.
Sample Use Case:
Input vlan set:
--------------
1 - s = set([11, 50, 25, 30, 15, 16, 3, 8, 2, 1])
2 - s = set([87, 11, 50, 25, 30, 15, 16, 3, 8, 2, 1, 88])
Returned compressed XML list:
----------------------------
1 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30', '50']
2 - compressed_list = ['1-3', '8', '11', '15-16', '25', '30',
'50', '87-88'] | 1.801401 | 1.839257 | 0.979417 |
intf_type, nexus_port = nexus_help.split_interface_name(port)
# If native_vlan is configured, this is isolated since
# two configs (native + trunk) must be sent for this vlan only.
if native_vlan != 0:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, native_vlan,
intf_type, nexus_port, True)
# If this is the only vlan
if len(pvlan_ids) == 1:
return
concat_vlans = ''
compressed_vlans = self._get_compressed_vlan_list(pvlan_ids)
for pvlan in compressed_vlans:
if concat_vlans == '':
concat_vlans = "%s" % pvlan
else:
concat_vlans += ",%s" % pvlan
# if string starts getting a bit long, send it.
if len(concat_vlans) >= const.CREATE_PORT_VLAN_LENGTH:
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False)
concat_vlans = ''
# Send remaining vlans if any
if len(concat_vlans):
self.driver.send_enable_vlan_on_trunk_int(
switch_ip, concat_vlans,
intf_type, nexus_port, False) | def _restore_port_binding(self,
switch_ip, pvlan_ids,
port, native_vlan) | Restores a set of vlans for a given port. | 3.641649 | 3.61255 | 1.008055 |
count = 1
conf_str = ''
vnsegment_sent = 0
path_str, conf_str = self.driver.start_create_vlan()
# At this time, this will only configure vni information when needed
while vnsegment_sent < const.CREATE_VLAN_BATCH and vlans:
vlan_id, vni = vlans.pop(0)
# Add it to the batch
conf_str = self.driver.get_create_vlan(
switch_ip, vlan_id, vni, conf_str)
# batch size has been met
if (count == const.CREATE_VLAN_SEND_SIZE):
conf_str = self.driver.end_create_vlan(conf_str)
self.driver.send_edit_string(switch_ip, path_str, conf_str)
vnsegment_sent += count
conf_str = ''
count = 1
else:
count += 1
# batch size was not met
if conf_str:
vnsegment_sent += count
conf_str = self.driver.end_create_vlan(conf_str)
self.driver.send_edit_string(switch_ip, path_str, conf_str)
conf_str = ''
LOG.debug("Switch %s VLAN vn-segment replay summary: %d",
switch_ip, vnsegment_sent) | def _restore_vxlan_entries(self, switch_ip, vlans) | Restore vxlan entries on a Nexus switch. | 3.981177 | 3.977637 | 1.00089 |
connections = self._get_active_port_connections(port, host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan created in this loop.
vlan_already_created = []
starttime = time.time()
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
try:
all_bindings = nxos_db.get_nexusvlan_binding(
vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
LOG.warning("Switch %(switch_ip)s and Vlan "
"%(vlan_id)s not found in port binding "
"database. Skipping this update",
{'switch_ip': switch_ip, 'vlan_id': vlan_id})
continue
previous_bindings = [row for row in all_bindings
if row.instance_id != device_id]
if previous_bindings and (switch_ip in vlan_already_created):
duplicate_type = const.DUPLICATE_VLAN
else:
vlan_already_created.append(switch_ip)
duplicate_type = const.NO_DUPLICATE
port_starttime = time.time()
try:
self._configure_port_binding(
is_provider_vlan, duplicate_type,
is_native,
switch_ip, vlan_id,
intf_type, nexus_port,
vni)
except Exception:
with excutils.save_and_reraise_exception():
self.driver.capture_and_print_timeshot(
port_starttime, "port_configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "configerr",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
port_starttime, "port_config",
switch=switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "config") | def _configure_port_entries(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan) | Create a nexus switch entry.
if needed, create a VLAN in the appropriate switch or port and
configure the appropriate interfaces for this VLAN.
Called during update postcommit port event. | 3.690048 | 3.684726 | 1.001444 |
next_range = self._pop_vlan_range(
switch_ip, const.CREATE_VLAN_BATCH)
if next_range:
try:
self.driver.set_all_vlan_states(
switch_ip, next_range)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error encountered restoring vlans "
"for switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._save_switch_vlan_range(switch_ip, [])
vxlan_range = self._get_switch_vxlan_range(switch_ip)
if vxlan_range:
try:
self._restore_vxlan_entries(switch_ip, vxlan_range)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("Error encountered restoring vxlans "
"for switch %(switch_ip)s",
{'switch_ip': switch_ip})
self._save_switch_vxlan_range(switch_ip, [])
# if no more vlans to restore, we're done. go active.
if (not self._get_switch_vlan_range(switch_ip) and
not self._get_switch_vxlan_range(switch_ip)):
self.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_ACTIVE)
LOG.info("Restore of Nexus switch "
"ip %(switch_ip)s is complete",
{'switch_ip': switch_ip})
else:
LOG.debug(("Restored batch of VLANS on "
"Nexus switch ip %(switch_ip)s"),
{'switch_ip': switch_ip}) | def configure_next_batch_of_vlans(self, switch_ip) | Get next batch of vlans and send them to Nexus. | 2.743973 | 2.705332 | 1.014283 |
prev_vlan = -1
prev_vni = -1
prev_port = None
prev_native_vlan = 0
starttime = time.time()
port_bindings.sort(key=lambda x: (x.port_id, x.vlan_id, x.vni))
self.driver.capture_and_print_timeshot(
starttime, "replay_t2_aft_sort",
switch=switch_ip)
# Let's make these lists a set to exclude duplicates
vlans = set()
pvlans = set()
interface_count = 0
duplicate_port = 0
vlan_count = 0
for port in port_bindings:
if nxos_db.is_reserved_binding(port):
continue
auto_create, auto_trunk = self._gather_config_parms(
nxos_db.is_provider_vlan(port.vlan_id), port.vlan_id)
if port.port_id == prev_port:
if port.vlan_id == prev_vlan and port.vni == prev_vni:
# Same port/Same Vlan - skip duplicate
duplicate_port += 1
continue
else:
# Same port/different Vlan - track it
vlan_count += 1
if auto_create:
vlans.add((port.vlan_id, port.vni))
if auto_trunk:
pvlans.add(port.vlan_id)
if port.is_native:
prev_native_vlan = port.vlan_id
else:
# Different port - write out interface trunk on previous port
if prev_port:
interface_count += 1
LOG.debug("Switch %s port %s replay summary: unique vlan "
"count %d, duplicate port entries %d",
switch_ip, prev_port, vlan_count, duplicate_port)
duplicate_port = 0
vlan_count = 0
if pvlans:
self._restore_port_binding(
switch_ip, pvlans, prev_port, prev_native_vlan)
pvlans.clear()
prev_native_vlan = 0
# Start tracking new port
if auto_create:
vlans.add((port.vlan_id, port.vni))
if auto_trunk:
pvlans.add(port.vlan_id)
prev_port = port.port_id
if port.is_native:
prev_native_vlan = port.vlan_id
if pvlans:
LOG.debug("Switch %s port %s replay summary: unique vlan "
"count %d, duplicate port entries %d",
switch_ip, port.port_id, vlan_count, duplicate_port)
self._restore_port_binding(
switch_ip, pvlans, prev_port, prev_native_vlan)
LOG.debug("Replayed total %d ports for Switch %s",
interface_count + 1, switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "replay_part_1",
switch=switch_ip)
vlans = list(vlans)
if vlans:
vlans.sort()
vlan, vni = vlans[0]
if vni == 0:
self._save_switch_vlan_range(switch_ip, vlans)
else:
self._save_switch_vxlan_range(switch_ip, vlans)
self.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_RESTORE_S2)
self.configure_next_batch_of_vlans(switch_ip)
self.driver.capture_and_print_timeshot(
starttime, "replay_part_2",
switch=switch_ip) | def configure_switch_entries(self, switch_ip, port_bindings) | Create a nexus switch entry in Nexus.
The port_bindings is sorted by vlan_id, vni, port_id.
When there is a change in vlan_id or vni, then vlan
data is configured in Nexus device.
Otherwise we check if there is a change in port_id
where we configure the port with vlan trunk config.
Called during switch replay event. | 3.022641 | 2.959743 | 1.021251 |
try:
rows = nxos_db.get_nexusvm_bindings(vlan_id, device_id)
for row in rows:
nxos_db.remove_nexusport_binding(row.port_id, row.vlan_id,
row.vni, row.switch_ip, row.instance_id)
except excep.NexusPortBindingNotFound:
return | def _delete_nxos_db(self, unused, vlan_id, device_id, host_id, vni,
is_provider_vlan) | Delete the nexus database entry.
Called during delete precommit port event. | 3.837581 | 3.73408 | 1.027718 |
'''This determines if port channel id needs to be freed.'''
# if this connection is not a port-channel, nothing to do.
if intf_type != 'port-channel':
return
# Check if this driver created it and its no longer needed.
try:
vpc = nxos_db.get_switch_vpc_alloc(
switch_ip, nexus_port)
except excep.NexusVPCAllocNotFound:
# This can occur for non-baremetal configured
# port-channels. Nothing more to do.
LOG.debug("Switch %s portchannel %s vpc entry not "
"found in vpcid alloc table.",
switch_ip, nexus_port)
return
# if this isn't one which was allocated or learned,
# don't do any further processing.
if not vpc.active:
LOG.debug("Switch %s portchannel %s vpc entry not "
"active.",
switch_ip, nexus_port)
return
# Is this port-channel still in use?
# If so, nothing more to do.
try:
nxos_db.get_nexus_switchport_binding(port_id, switch_ip)
LOG.debug("Switch %s portchannel %s port entries "
"in use. Skipping port-channel clean-up.",
switch_ip, nexus_port)
return
except excep.NexusPortBindingNotFound:
pass
# need to get ethernet interface name
try:
mapping = nxos_db.get_switch_and_host_mappings(
host_id, switch_ip)
eth_type, eth_port = nexus_help.split_interface_name(
mapping[0].if_id)
except excep.NexusHostMappingNotFound:
LOG.warning("Switch %s hostid %s host_mapping not "
"found. Skipping port-channel clean-up.",
switch_ip, host_id)
return
# Remove the channel group from ethernet interface
# and remove port channel from this switch.
if not vpc.learned:
self.driver.delete_ch_grp_to_interface(
switch_ip, eth_type, eth_port,
nexus_port)
self.driver.delete_port_channel(switch_ip,
nexus_port)
try:
nxos_db.free_vpcid_for_switch(nexus_port, switch_ip)
LOG.info("Released portchannel %s resources for "
"switch %s",
nexus_port, switch_ip)
except excep.NexusVPCAllocNotFound:
# Not all learned port channels will be in this db when
# they're outside the configured vpc_pool so
# this exception may be possible.
LOG.warning("Failed to free vpcid %s for switch %s "
"since it did not exist in table.",
nexus_port, switch_ip) | def _delete_port_channel_resources(self, host_id, switch_ip,
intf_type, nexus_port, port_id) | This determines if port channel id needs to be freed. | 4.190405 | 3.962011 | 1.057646 |
connections = self._get_active_port_connections(port, host_id)
# (nexus_port,switch_ip) will be unique in each iteration.
# But switch_ip will repeat if host has >1 connection to same switch.
# So track which switch_ips already have vlan removed in this loop.
vlan_already_removed = []
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
# if there are no remaining db entries using this vlan on this
# nexus switch port then remove vlan from the switchport trunk.
port_id = nexus_help.format_interface_name(intf_type, nexus_port)
auto_create = True
auto_trunk = True
if is_provider_vlan:
auto_create = cfg.CONF.ml2_cisco.provider_vlan_auto_create
auto_trunk = cfg.CONF.ml2_cisco.provider_vlan_auto_trunk
try:
nxos_db.get_port_vlan_switch_binding(port_id, vlan_id,
switch_ip)
except excep.NexusPortBindingNotFound:
pass
else:
continue
if auto_trunk:
self.driver.disable_vlan_on_trunk_int(
switch_ip, vlan_id, intf_type, nexus_port,
is_native)
# if there are no remaining db entries using this vlan on this
# nexus switch then remove the vlan.
if auto_create:
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except excep.NexusPortBindingNotFound:
# Do not perform a second time on same switch
if switch_ip not in vlan_already_removed:
self.driver.delete_vlan(switch_ip, vlan_id)
vlan_already_removed.append(switch_ip)
self._delete_port_channel_resources(
host_id, switch_ip, intf_type, nexus_port, port_id)
if nexus_help.is_baremetal(port):
connections = self._get_baremetal_connections(
port, False, True)
for switch_ip, intf_type, nexus_port, is_native, _ in connections:
if_id = nexus_help.format_interface_name(
intf_type, nexus_port)
try:
mapping = nxos_db.get_switch_if_host_mappings(
switch_ip, if_id)
ch_grp = mapping[0].ch_grp
except excep.NexusHostMappingNotFound:
ch_grp = 0
bind_port_id = nexus_help.format_interface_name(
intf_type, nexus_port, ch_grp)
binding = nxos_db.get_port_switch_bindings(
bind_port_id,
switch_ip)
if not binding:
nxos_db.remove_host_mapping(if_id, switch_ip) | def _delete_switch_entry(self, port, vlan_id, device_id, host_id, vni,
is_provider_vlan) | Delete the nexus switch entry.
By accessing the current db entries determine if switch
configuration can be removed.
Called during delete postcommit port event. | 3.356126 | 3.315995 | 1.012102 |
# Verify segment.
if not self._is_valid_segment(segment):
return
device_id = self._get_port_uuid(port)
if nexus_help.is_baremetal(port):
host_id = port.get('dns_name')
else:
host_id = port.get(bc.portbindings.HOST_ID)
vlan_id = segment.get(api.SEGMENTATION_ID)
is_provider = nxos_db.is_provider_vlan(vlan_id)
settings = {"vlan_id": vlan_id,
"device_id": device_id,
"host_id": host_id}
missing_fields = [field for field, value in settings.items()
if (field != 'host_id' and not value)]
if not missing_fields:
func(port, vlan_id, device_id, host_id, vni, is_provider)
else:
raise excep.NexusMissingRequiredFields(
fields=' '.join(missing_fields)) | def _port_action_vlan(self, port, segment, func, vni) | Verify configuration and then process event. | 3.584534 | 3.530159 | 1.015403 |
# If the segment is None, just log a warning message and return.
if segment is None:
self._log_missing_segment()
return
device_id = port.get('device_id')
mcast_group = segment.get(api.PHYSICAL_NETWORK)
host_id = port.get(bc.portbindings.HOST_ID)
vni = segment.get(api.SEGMENTATION_ID)
if vni and device_id and mcast_group and host_id:
func(vni, device_id, mcast_group, host_id)
return vni
else:
fields = "vni " if not vni else ""
fields += "device_id " if not device_id else ""
fields += "mcast_group " if not mcast_group else ""
fields += "host_id" if not host_id else ""
raise excep.NexusMissingRequiredFields(fields=fields) | def _port_action_vxlan(self, port, segment, func) | Verify configuration and then process event. | 2.923133 | 2.909736 | 1.004604 |
# No new events are handled until replay
# thread has put the switch in active state.
# If a switch is in active state, verify
# the switch is still in active state
# before accepting this new event.
#
# If create_port_postcommit fails, it causes
# other openstack dbs to be cleared and
# retries for new VMs will stop. Subnet
# transactions will continue to be retried.
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
# Verify segment.
if not self._is_valid_segment(vlan_segment):
return
port = context.current
if self._is_supported_deviceowner(port):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# Verify switch is still up before replay
# thread checks.
verified_active_switches = []
for switch_ip in active_switches:
try:
self.driver.get_nexus_type(switch_ip)
verified_active_switches.append(switch_ip)
except Exception as e:
LOG.error("Failed to ping "
"switch ip %(switch_ip)s error %(exp_err)s",
{'switch_ip': switch_ip, 'exp_err': e})
LOG.debug("Create Stats: thread %(thid)d, "
"all_switches %(all)d, "
"active %(active)d, verified %(verify)d",
{'thid': threading.current_thread().ident,
'all': len(all_switches),
'active': len(active_switches),
'verify': len(verified_active_switches)})
# if host_id is valid and there is no active
# switches remaining
if all_switches and not verified_active_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Create Failed: Port event can not "
"be processed at this time.") | def create_port_postcommit(self, context) | Create port non-database commit event. | 4.81355 | 4.773852 | 1.008316 |
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment, context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
if (self._is_vm_migrating(context, vlan_segment, orig_vlan_segment) or
self._is_status_down(context.current)):
vni = (self._port_action_vxlan(
context.original, orig_vxlan_segment, self._delete_nve_db)
if orig_vxlan_segment else 0)
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_nxos_db, vni)
elif self._is_supported_deviceowner(context.current):
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_nxos_db, vni) | def update_port_precommit(self, context) | Update port pre-database transaction commit event. | 2.876115 | 2.864385 | 1.004095 |
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment, context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
if (self._is_vm_migrating(context, vlan_segment, orig_vlan_segment) or
self._is_status_down(context.current)):
vni = (self._port_action_vxlan(
context.original, orig_vxlan_segment,
self._delete_nve_member) if orig_vxlan_segment else 0)
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_switch_entry, vni)
elif self._is_supported_deviceowner(context.current):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# if switches not active but host_id is valid
if not active_switches and all_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Update Port Failed: Nexus Switch "
"is down or replay in progress")
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_port_entries, vni) | def update_port_postcommit(self, context) | Update port non-database commit event. | 3.735491 | 3.716337 | 1.005154 |
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_nxos_db, vni) | def delete_port_precommit(self, context) | Delete port pre-database commit event. | 4.578654 | 4.533062 | 1.010058 |
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_switch_entry, vni) | def delete_port_postcommit(self, context) | Delete port non-database commit event. | 4.857898 | 4.776517 | 1.017038 |
ver_expr = "([0-9]+)\.([0-9]+)\((.*)\)"
re.compile(ver_expr)
v1 = re.match(ver_expr, self._cur_ver)
v2 = re.match(ver_expr, self._base_ver)
if int(v1.group(1)) > int(v2.group(1)):
self._is_iplus = True
elif int(v1.group(1)) == int(v2.group(1)):
if int(v1.group(2)) > int(v2.group(2)):
self._is_iplus = True
elif int(v1.group(2)) == int(v2.group(2)):
self._is_iplus = v1.group(3) >= v2.group(3)
LOG.info("DCNM version: %(cur_ver)s, iplus: %(is_iplus)s",
{'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus}) | def _detect_iplus(self) | Check the DCNM version and determine if it's for iplus | 2.179576 | 1.87223 | 1.16416 |
url = "%s/%s" % (self._segmentid_ranges_url, orchestrator_id)
res = self._send_request('GET', url, None, 'segment-id range')
if res and res.status_code in self._resp_ok:
return res.json() | def get_segmentid_range(self, orchestrator_id) | Get segment id range from DCNM. | 4.373398 | 4.118911 | 1.061785 |
url = self._segmentid_ranges_url
payload = {'orchestratorId': orchestrator_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
res = self._send_request('POST', url, payload, 'segment-id range')
if not (res and res.status_code in self._resp_ok):
LOG.error("Failed to set segment id range for orchestrator "
"%(orch)s on DCNM: %(text)s",
{'orch': orchestrator_id, 'text': res.text})
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | def set_segmentid_range(self, orchestrator_id, segid_min, segid_max) | set segment id range in DCNM. | 4.131579 | 3.868797 | 1.067924 |
try:
cfgplist = self.config_profile_list()
if self.default_cfg_profile not in cfgplist:
self.default_cfg_profile = ('defaultNetworkUniversalEfProfile'
if self._is_iplus else
'defaultNetworkIpv4EfProfile')
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.")
self.default_cfg_profile = 'defaultNetworkIpv4EfProfile' | def _set_default_cfg_profile(self) | Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version. | 8.856244 | 6.727198 | 1.316483 |
url = self._create_network_url % (network_info['organizationName'],
network_info['partitionName'])
payload = network_info
LOG.info('url %(url)s payload %(payload)s',
{'url': url, 'payload': payload})
return self._send_request('POST', url, payload, 'network') | def _create_network(self, network_info) | Send create network request to DCNM.
:param network_info: network parameters to be created on DCNM | 4.36817 | 4.346809 | 1.004914 |
url = self._cfg_profile_get_url % (thisprofile)
payload = {}
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json() | def _config_profile_get(self, thisprofile) | Get information of a config profile from DCNM.
:param thisprofile: network config profile in request | 4.469301 | 4.775981 | 0.935787 |
url = self._cfg_profile_list_url
payload = {}
try:
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | def _config_profile_list(self) | Get list of supported config profile from DCNM. | 6.041853 | 4.772498 | 1.265973 |
url = self._global_settings_url
payload = {}
res = self._send_request('GET', url, payload, 'settings')
if res and res.status_code in self._resp_ok:
return res.json() | def _get_settings(self) | Get global mobility domain from DCNM. | 5.117796 | 4.198516 | 1.218954 |
url = self._org_url
payload = {
"organizationName": name,
"description": name if len(desc) == 0 else desc,
"orchestrationSource": orch_id}
return self._send_request('POST', url, payload, 'organization') | def _create_org(self, orch_id, name, desc) | Create organization on the DCNM.
:param orch_id: orchestrator ID
:param name: Name of organization
:param desc: Description of organization | 4.594086 | 5.48099 | 0.838185 |
if part_name is None:
part_name = self._part_name
if vrf_prof is None or dci_id == UNKNOWN_DCI_ID or (
service_node_ip == UNKNOWN_SRVN_NODE_IP):
part_info = self._get_partition(org_name, part_name)
if vrf_prof is None:
vrf_prof = self.get_partition_vrfProf(org_name, part_name,
part_info=part_info)
if dci_id == UNKNOWN_DCI_ID:
dci_id = self.get_partition_dciId(org_name, part_name,
part_info=part_info)
if service_node_ip == UNKNOWN_SRVN_NODE_IP:
service_node_ip = self.get_partition_serviceNodeIp(
org_name, part_name, part_info=part_info)
url = ((self._create_part_url % (org_name)) if operation == 'POST' else
self._update_part_url % (org_name, part_name))
payload = {
"partitionName": part_name,
"description": part_name if len(desc) == 0 else desc,
"serviceNodeIpAddress": service_node_ip,
"organizationName": org_name}
# Check the DCNM version and find out whether it is need to have
# extra payload for the new version when creating/updating a partition.
if self._is_iplus:
# Need to add extra payload for the new version.
enable_dci = "true" if dci_id and int(dci_id) != 0 else "false"
extra_payload = {
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"dciId": dci_id,
"enableDCIExtension": enable_dci}
payload.update(extra_payload)
return self._send_request(operation, url, payload, 'partition') | def _create_or_update_partition(self, org_name, part_name, desc,
dci_id=UNKNOWN_DCI_ID, vrf_prof=None,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
operation='POST') | Send create or update partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
:param desc: description of partition
:dci_id: DCI ID for inter-DC
:vrf_prof: VRF Profile Name
:service_node_ip: Service Node's Address | 2.559923 | 2.591029 | 0.987995 |
if part_name is None:
part_name = self._part_name
url = self._update_part_url % (org_name, part_name)
res = self._send_request("GET", url, '', 'partition')
if res and res.status_code in self._resp_ok:
return res.json() | def _get_partition(self, org_name, part_name=None) | send get partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition | 3.595145 | 3.831105 | 0.938409 |
if part_name is None:
part_name = self._part_name
if vrf_prof is None:
vrf_prof = self.default_vrf_profile
operation = 'PUT'
url = (self._update_part_url % (org_name, part_name))
ip_str = ''
ip_cnt = 0
for ip in static_ip_list:
ip_sub = "$n0" + str(ip_cnt) + "=" + str(ip) + ";"
ip_str = ip_str + ip_sub
ip_cnt = ip_cnt + 1
cfg_args = {
"$vrfName=" + org_name + ':' + part_name + ";"
"$include_serviceNodeIpAddress=" + service_node_ip + ";" + ip_str
}
cfg_args = ';'.join(cfg_args)
payload = {
"partitionName": part_name,
"organizationName": org_name,
"dciExtensionStatus": "Not configured",
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"configArg": cfg_args}
res = self._send_request(operation, url, payload, 'partition')
return (res is not None and res.status_code in self._resp_ok) | def update_partition_static_route(self, org_name, part_name,
static_ip_list, vrf_prof=None,
service_node_ip=None) | Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address | 3.731119 | 3.767075 | 0.990455 |
url = self._del_org_url % (org_name)
return self._send_request('DELETE', url, '', 'organization') | def _delete_org(self, org_name) | Send organization delete request to DCNM.
:param org_name: name of organization to be deleted | 6.681447 | 7.692314 | 0.868587 |
url = self._del_part % (org_name, partition_name)
return self._send_request('DELETE', url, '', 'partition') | def _delete_partition(self, org_name, partition_name) | Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition | 6.393898 | 7.438419 | 0.859578 |
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
if 'mobDomainName' in network_info:
vlan_id = network_info['vlanId']
mob_dom_name = network_info['mobDomainName']
url = self._network_mob_url % (org_name, part_name, vlan_id,
mob_dom_name)
else:
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('DELETE', url, '', 'network') | def _delete_network(self, network_info) | Send network delete request to DCNM.
:param network_info: contains network info to be deleted. | 2.96184 | 2.98249 | 0.993076 |
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('GET', url, '', 'network') | def _get_network(self, network_info) | Send network get request to DCNM.
:param network_info: contains network info to query. | 3.386567 | 3.371459 | 1.004481 |
expiration_time = self._exp_time
payload = {'expirationTime': expiration_time}
# TODO(padkrish), after testing with certificates, make the
# verify option configurable.
res = requests.post(url_login,
data=jsonutils.dumps(payload),
headers=self._req_headers,
auth=(self._user, self._pwd),
timeout=self.timeout_resp, verify=False)
session_id = ''
if res and res.status_code in self._resp_ok:
session_id = res.json().get('Dcnm-Token')
self._req_headers.update({'Dcnm-Token': session_id}) | def _login_request(self, url_login) | Internal function to send login request. | 5.789779 | 5.561183 | 1.041106 |
requests.post(url_logout,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False) | def _logout_request(self, url_logout) | Internal logout request to DCNM. | 7.637072 | 6.573872 | 1.161731 |
res = None
try:
payload_json = None
if payload and payload != '':
payload_json = jsonutils.dumps(payload)
self._login()
desc_lookup = {'POST': ' creation', 'PUT': ' update',
'DELETE': ' deletion', 'GET': ' get'}
res = requests.request(operation, url, data=payload_json,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False)
desc += desc_lookup.get(operation, operation.lower())
LOG.info("DCNM-send_request: %(desc)s %(url)s %(pld)s",
{'desc': desc, 'url': url, 'pld': payload})
self._logout()
except (requests.HTTPError, requests.Timeout,
requests.ConnectionError) as exc:
LOG.exception('Error during request: %s', exc)
raise dexc.DfaClientRequestFailed(reason=exc)
return res | def _send_request(self, operation, url, payload, desc) | Send request to DCNM. | 3.976036 | 3.576651 | 1.111665 |
these_profiles = self._config_profile_list() or []
profile_list = [q for p in these_profiles for q in
[p.get('profileName')]]
return profile_list | def config_profile_list(self) | Return config profile list from DCNM. | 7.892429 | 5.875801 | 1.343209 |
profile_params = self._config_profile_get(profile_name)
fwd_cli = 'fabric forwarding mode proxy-gateway'
if profile_params and fwd_cli in profile_params['configCommands']:
return 'proxy-gateway'
else:
return 'anycast-gateway' | def config_profile_fwding_mode_get(self, profile_name) | Return forwarding mode of given config profile. | 6.606061 | 6.430925 | 1.027233 |
cfgplist = self.config_profile_list()
cfgname = net_name.partition(':')[2]
cfgtuple = set()
for cfg_prof in cfgplist:
if cfg_prof.startswith('defaultNetwork'):
cfg_alias = (cfg_prof.split('defaultNetwork')[1].
split('Profile')[0])
elif cfg_prof.endswith('Profile'):
cfg_alias = cfg_prof.split('Profile')[0]
else:
cfg_alias = cfg_prof
cfgtuple.update([(cfg_prof, cfg_alias)])
cfgp = [a for a, b in cfgtuple if cfgname == b]
prof = cfgp[0] if cfgp else self.default_cfg_profile
fwd_mod = self.config_profile_fwding_mode_get(prof)
return (prof, fwd_mod) | def get_config_profile_for_network(self, net_name) | Get the list of profiles. | 4.018786 | 4.027565 | 0.99782 |
seg_id = str(network.segmentation_id)
subnet_ip_mask = subnet.cidr.split('/')
gw_ip = subnet.gateway_ip
cfg_args = [
"$segmentId=" + seg_id,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=0",
"$vrfName=" + tenant_name + ':' + self._part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"segmentId": seg_id,
"vlanId": "0",
"mobilityDomainId": "None",
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": self._part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name,
self._part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info("Creating %s network in DCNM.", network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info("Created %s network in DCNM.", network_info)
else:
LOG.error("Failed to create %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=res) | def create_network(self, tenant_name, network, subnet,
dhcp_range=True) | Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network | 3.671568 | 3.696951 | 0.993134 |
network_info = {}
subnet_ip_mask = subnet.cidr.split('/')
if self._default_md is None:
self._set_default_mobility_domain()
vlan_id = '0'
gw_ip = subnet.gateway_ip
part_name = network.part_name
if not part_name:
part_name = self._part_name
if network.vlan_id:
vlan_id = str(network.vlan_id)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
mob_domain_name = self._default_md
else:
mob_domain_name = None
seg_id = str(network.segmentation_id)
seg_str = "$segmentId=" + seg_id
cfg_args = [
seg_str,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=" + vlan_id,
"$vrfName=" + tenant_name + ':' + part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"vlanId": vlan_id,
"mobilityDomainId": mob_domain_name,
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if seg_id:
network_info["segmentId"] = seg_id
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if hasattr(subnet, 'secondary_gw'):
network_info["secondaryGateway"] = subnet.secondary_gw
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name, part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info("Creating %s network in DCNM.", network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info("Created %s network in DCNM.", network_info)
else:
LOG.error("Failed to create %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | def create_service_network(self, tenant_name, network, subnet,
dhcp_range=True) | Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network | 3.386333 | 3.386798 | 0.999863 |
seg_id = network.segmentation_id
network_info = {
'organizationName': tenant_name,
'partitionName': self._part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error("Failed to delete %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=res) | def delete_network(self, tenant_name, network) | Delete network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters | 3.662042 | 3.609391 | 1.014587 |
network_info = {}
part_name = network.part_name
if not part_name:
part_name = self._part_name
seg_id = str(network.segmentation_id)
if network.vlan:
vlan_id = str(network.vlan)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
# The current way will not work since _default_md is obtained
# during create_service_network. It's preferrable to get it
# during init TODO(padkrish)
if self._default_md is None:
self._set_default_mobility_domain()
mob_domain_name = self._default_md
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'mobDomainName': mob_domain_name,
'vlanId': vlan_id,
'segmentId': seg_id,
}
else:
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error("Failed to delete %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | def delete_service_network(self, tenant_name, network) | Delete service network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters | 3.298339 | 3.223927 | 1.023081 |
res = self._delete_partition(tenant_name, part_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", part_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
res = self._delete_org(tenant_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s organization in DCNM.", tenant_name)
else:
LOG.error("Failed to delete %(org)s organization in DCNM."
"Response: %(res)s", {'org': tenant_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | def delete_project(self, tenant_name, part_name) | Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition. | 2.205173 | 2.109925 | 1.045143 |
res = self._delete_partition(org_name, partition_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", partition_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s",
({'part': partition_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | def delete_partition(self, org_name, partition_name) | Send partition delete request to DCNM.
:param partition_name: name of partition to be deleted | 4.302208 | 4.051321 | 1.061927 |
desc = desc or org_name
res = self._create_org(orch_id, org_name, desc)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s organization in DCNM.", org_name)
else:
LOG.error("Failed to create %(org)s organization in DCNM."
"Response: %(res)s", {'org': org_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
self.create_partition(org_name, part_name, dci_id,
self.default_vrf_profile, desc=desc) | def create_project(self, orch_id, org_name, part_name, dci_id, desc=None) | Create project on the DCNM.
:param orch_id: orchestrator ID
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. | 4.026618 | 4.109191 | 0.979905 |
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None) | Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. | 3.163119 | 3.350187 | 0.944162 |
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name,
desc, dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s partition in DCNM.", part_name)
else:
LOG.error("Failed to create %(part)s partition in DCNM."
"Response: %(res)s", ({'part': part_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | def create_partition(self, org_name, part_name, dci_id, vrf_prof,
service_node_ip=None, desc=None) | Create partition on the DCNM.
:param org_name: name of organization to be created
:param part_name: name of partition to be created
:param dci_id: DCI ID
:vrf_prof: VRF profile for the partition
:param service_node_ip: Specifies the Default route IP address.
:param desc: string that describes organization | 3.14028 | 3.351385 | 0.93701 |
vrf_profile = None
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if ("vrfProfileName" in part_info):
vrf_profile = part_info.get("vrfProfileName")
return vrf_profile | def get_partition_vrfProf(self, org_name, part_name=None, part_info=None) | get VRF Profile for the partition from the DCNM.
:param org_name: name of organization
:param part_name: name of partition | 3.231955 | 3.207496 | 1.007625 |
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if part_info is not None and "dciId" in part_info:
return part_info.get("dciId") | def get_partition_dciId(self, org_name, part_name, part_info=None) | get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition | 3.074533 | 3.360333 | 0.914949 |
if org and part:
list_url = self._del_part + '/networks'
list_url = list_url % (org, part)
res = self._send_request('GET', list_url, '', 'networks')
if res and res.status_code in self._resp_ok:
return res.json() | def list_networks(self, org, part) | Return list of networks from DCNM.
:param org: name of organization.
:param part: name of partition. | 4.35416 | 5.270286 | 0.826172 |
try:
res = self._send_request('GET', self._org_url, '', 'organizations')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | def list_organizations(self) | Return list of organizations from DCNM. | 8.354967 | 6.389884 | 1.30753 |
network_info = {
'organizationName': org,
'partitionName': self._part_name,
'segmentId': segid,
}
res = self._get_network(network_info)
if res and res.status_code in self._resp_ok:
return res.json() | def get_network(self, org, segid) | Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network. | 4.157736 | 4.732732 | 0.878507 |
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version')
except dexc.DfaClientRequestFailed as exc:
LOG.error("Failed to get DCNM version.")
sys.exit("ERROR: Failed to connect to DCNM: %s", exc) | def get_version(self) | Get the DCNM version. | 5.310132 | 4.473103 | 1.187125 |
protocol = self.dcnm_protocol
self._org_url = '%s://%s/rest/auto-config/organizations' % (
(protocol, self._ip))
self._create_network_url = ('%s://%s/' % (protocol, self._ip) +
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self.host_protocol_url = '%s://%s/' % (protocol, self._ip)
self._create_network_url = self._build_url(
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self._cfg_profile_list_url = '%s://%s/rest/auto-config/profiles' % (
(protocol, self._ip))
self._cfg_profile_get_url = self._cfg_profile_list_url + '/%s'
self._global_settings_url = self._build_url(
'rest/auto-config/settings')
self._create_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions')
self._update_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._del_org_url = self._build_url(
'rest/auto-config/organizations/%s')
self._del_part = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._network_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/segment/%s')
self._network_mob_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/vlan/%s/mobility-domain/%s')
self._segmentid_ranges_url = self._build_url(
'rest/settings/segmentid-ranges')
self._login_url = self._build_url('rest/logon')
self._logout_url = self._build_url('rest/logout') | def fill_urls(self) | This assigns the URL's based on the protocol. | 2.424364 | 2.38872 | 1.014922 |
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
attr.PLURALS.update(plural_mappings)
action_map = {DEVICE: {'get_hosting_device_config': 'GET'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.DEVICE_MANAGER,
action_map=action_map) | def get_resources(cls) | Returns Ext Resources. | 4.632615 | 4.374654 | 1.058967 |
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR | def get_inspector() | Reuse inspector | 4.051996 | 3.7851 | 1.070512 |
output = []
tables = get_tables()
for table in tables:
columns = get_columns(table)
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output | def get_data() | Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists. | 3.664904 | 2.307716 | 1.588109 |
return {
sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()],
sa.Index: get_tables()
} | def contract_creation_exceptions() | Special migration for the blueprint to support Keystone V3.
We drop all tenant_id columns and create project_id columns instead. | 12.57741 | 7.747066 | 1.623506 |
admin_context = context.is_admin and context or context.elevated()
dmplugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER)
if (hosting_device is not None and extensions.is_extension_supported(
dmplugin, CFGAGENT_SCHED)):
agents = dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [hosting_device['id']], admin_state_up=True,
schedule=True)
if agents:
agent = agents[0]
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s [BULK]',
{'agent_type': agent.agent_type,
'topic': CFG_AGENT_L3_ROUTING,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host,
version='1.1')
cctxt.cast(context, method, routers=routers) | def _agent_notification_bulk(self, context, method, routers,
hosting_device, operation) | Notify the Cisco cfg agent handling a particular hosting_device.
A single notification can contain multiple routers. | 4.108169 | 4.137105 | 0.993006 |
if extensions.is_extension_supported(self._l3plugin, L3AGENT_SCHED):
adm_context = (context.is_admin and context or context.elevated())
# This is where hosting device gets scheduled to Cisco cfg agent
self._l3plugin.schedule_routers(adm_context, routers)
self._agent_notification(
context, method, routers, operation, shuffle_agents)
else:
cctxt = self.client.prepare(topics=topics.L3_AGENT, fanout=True)
cctxt.cast(context, method, routers=[r['id'] for r in routers]) | def _notification(self, context, method, routers, operation,
shuffle_agents) | Notify all or individual Cisco cfg agents. | 5.261849 | 4.833843 | 1.088544 |
if routers:
self._notification(context, 'routers_updated', routers, operation,
shuffle_agents) | def routers_updated(self, context, routers, operation=None, data=None,
shuffle_agents=False) | Notify cfg agents about configuration changes to routers.
This includes operations performed on the router like when a
router interface is added or removed. | 4.522285 | 5.538845 | 0.816467 |
self._notification(context, 'router_removed_from_hosting_device',
[router], operation=None, shuffle_agents=False) | def router_removed_from_hosting_device(self, context, router) | Notify cfg agent about router removed from hosting device. | 10.408905 | 8.290003 | 1.255597 |
self._notification(context, 'router_added_to_hosting_device',
[router], operation=None, shuffle_agents=False) | def router_added_to_hosting_device(self, context, router) | Notify cfg agent about router added to hosting device. | 10.531413 | 8.280742 | 1.271796 |
self._agent_notification_bulk(
context, 'router_removed_from_hosting_device', router_ids,
hosting_device, operation=None) | def routers_removed_from_hosting_device(self, context, router_ids,
hosting_device) | Notify cfg agent that routers have been removed from hosting device.
@param: context - information about tenant, user etc
@param: router-ids - list of ids
@param: hosting_device - device hosting the routers | 5.721251 | 5.979904 | 0.956746 |
LOG.debug('Routing_key: %(key)s, body: %(body)s.',
{'key': method.routing_key, 'body': body})
partition_keyword = 'auto-config.organization.partition'
network_keyword = partition_keyword + '.network'
network_create_key = network_keyword + '.create'
network_update_key = network_keyword + '.update'
msg = jsonutils.loads(body)
LOG.debug('_cb_dcnm_msg: RX message: %s', msg)
if not msg:
LOG.debug("error, return")
return
url = msg['link']
url_fields = url.split('/')
pre_project_name = url_fields[4]
pre_partition_name = url_fields[6]
pre_seg_id = url_fields[9]
data = {"project_name": pre_project_name,
"partition_name": pre_partition_name,
"segmentation_id": pre_seg_id}
if network_create_key in method.routing_key or (
network_update_key in method.routing_key):
pri = self._create_pri
event_type = 'dcnm.network.create'
else:
pri = self._delete_pri
event_type = 'dcnm.network.delete'
if self._pq is not None:
payload = (event_type, data)
self._pq.put((pri, time.ctime, payload)) | def _cb_dcnm_msg(self, method, body) | Callback function to process DCNM network creation/update/deletion
message received by AMQP.
It also communicates with DCNM to extract info for CPNR record
insertion/deletion.
:param pika.channel.Channel ch: The channel instance.
:param pika.Spec.Basic.Deliver method: The basic deliver method
which includes routing key.
:param pika.Spec.BasicProperties properties: properties
:param str body: The message body. | 3.762241 | 3.688064 | 1.020113 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.