sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def update_port_precommit(self, context):
"""Update port pre-database transaction commit event."""
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment, context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
if (self._is_vm_migrating(context, vlan_segment, orig_vlan_segment) or
self._is_status_down(context.current)):
vni = (self._port_action_vxlan(
context.original, orig_vxlan_segment, self._delete_nve_db)
if orig_vxlan_segment else 0)
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_nxos_db, vni)
elif self._is_supported_deviceowner(context.current):
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_nxos_db, vni) | Update port pre-database transaction commit event. | entailment |
def update_port_postcommit(self, context):
"""Update port non-database commit event."""
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment, context.bottom_bound_segment)
orig_vlan_segment, orig_vxlan_segment = self._get_segments(
context.original_top_bound_segment,
context.original_bottom_bound_segment)
if (self._is_vm_migrating(context, vlan_segment, orig_vlan_segment) or
self._is_status_down(context.current)):
vni = (self._port_action_vxlan(
context.original, orig_vxlan_segment,
self._delete_nve_member) if orig_vxlan_segment else 0)
self._port_action_vlan(context.original, orig_vlan_segment,
self._delete_switch_entry, vni)
elif self._is_supported_deviceowner(context.current):
if nexus_help.is_baremetal(context.current):
all_switches, active_switches = (
self._get_baremetal_switches(context.current))
else:
host_id = context.current.get(bc.portbindings.HOST_ID)
all_switches, active_switches = (
self._get_host_switches(host_id))
# if switches not active but host_id is valid
if not active_switches and all_switches:
raise excep.NexusConnectFailed(
nexus_host=all_switches[0], config="None",
exc="Update Port Failed: Nexus Switch "
"is down or replay in progress")
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._configure_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._configure_port_entries, vni) | Update port non-database commit event. | entailment |
def delete_port_precommit(self, context):
"""Delete port pre-database commit event."""
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_db) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_nxos_db, vni) | Delete port pre-database commit event. | entailment |
def delete_port_postcommit(self, context):
"""Delete port non-database commit event."""
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_switch_entry, vni) | Delete port non-database commit event. | entailment |
def _detect_iplus(self):
"""Check the DCNM version and determine if it's for iplus"""
ver_expr = "([0-9]+)\.([0-9]+)\((.*)\)"
re.compile(ver_expr)
v1 = re.match(ver_expr, self._cur_ver)
v2 = re.match(ver_expr, self._base_ver)
if int(v1.group(1)) > int(v2.group(1)):
self._is_iplus = True
elif int(v1.group(1)) == int(v2.group(1)):
if int(v1.group(2)) > int(v2.group(2)):
self._is_iplus = True
elif int(v1.group(2)) == int(v2.group(2)):
self._is_iplus = v1.group(3) >= v2.group(3)
LOG.info("DCNM version: %(cur_ver)s, iplus: %(is_iplus)s",
{'cur_ver': self._cur_ver, 'is_iplus': self._is_iplus}) | Check the DCNM version and determine if it's for iplus | entailment |
def get_segmentid_range(self, orchestrator_id):
"""Get segment id range from DCNM. """
url = "%s/%s" % (self._segmentid_ranges_url, orchestrator_id)
res = self._send_request('GET', url, None, 'segment-id range')
if res and res.status_code in self._resp_ok:
return res.json() | Get segment id range from DCNM. | entailment |
def set_segmentid_range(self, orchestrator_id, segid_min, segid_max):
"""set segment id range in DCNM. """
url = self._segmentid_ranges_url
payload = {'orchestratorId': orchestrator_id,
'segmentIdRanges': "%s-%s" % (segid_min, segid_max)}
res = self._send_request('POST', url, payload, 'segment-id range')
if not (res and res.status_code in self._resp_ok):
LOG.error("Failed to set segment id range for orchestrator "
"%(orch)s on DCNM: %(text)s",
{'orch': orchestrator_id, 'text': res.text})
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | set segment id range in DCNM. | entailment |
def _set_default_cfg_profile(self):
"""Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version.
"""
try:
cfgplist = self.config_profile_list()
if self.default_cfg_profile not in cfgplist:
self.default_cfg_profile = ('defaultNetworkUniversalEfProfile'
if self._is_iplus else
'defaultNetworkIpv4EfProfile')
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.")
self.default_cfg_profile = 'defaultNetworkIpv4EfProfile' | Set default network config profile.
Check whether the default_cfg_profile value exist in the current
version of DCNM. If not, set it to new default value which is supported
by latest version. | entailment |
def _create_network(self, network_info):
"""Send create network request to DCNM.
:param network_info: network parameters to be created on DCNM
"""
url = self._create_network_url % (network_info['organizationName'],
network_info['partitionName'])
payload = network_info
LOG.info('url %(url)s payload %(payload)s',
{'url': url, 'payload': payload})
return self._send_request('POST', url, payload, 'network') | Send create network request to DCNM.
:param network_info: network parameters to be created on DCNM | entailment |
def _config_profile_get(self, thisprofile):
"""Get information of a config profile from DCNM.
:param thisprofile: network config profile in request
"""
url = self._cfg_profile_get_url % (thisprofile)
payload = {}
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json() | Get information of a config profile from DCNM.
:param thisprofile: network config profile in request | entailment |
def _config_profile_list(self):
"""Get list of supported config profile from DCNM."""
url = self._cfg_profile_list_url
payload = {}
try:
res = self._send_request('GET', url, payload, 'config-profile')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | Get list of supported config profile from DCNM. | entailment |
def _get_settings(self):
"""Get global mobility domain from DCNM."""
url = self._global_settings_url
payload = {}
res = self._send_request('GET', url, payload, 'settings')
if res and res.status_code in self._resp_ok:
return res.json() | Get global mobility domain from DCNM. | entailment |
def _create_org(self, orch_id, name, desc):
"""Create organization on the DCNM.
:param orch_id: orchestrator ID
:param name: Name of organization
:param desc: Description of organization
"""
url = self._org_url
payload = {
"organizationName": name,
"description": name if len(desc) == 0 else desc,
"orchestrationSource": orch_id}
return self._send_request('POST', url, payload, 'organization') | Create organization on the DCNM.
:param orch_id: orchestrator ID
:param name: Name of organization
:param desc: Description of organization | entailment |
def _create_or_update_partition(self, org_name, part_name, desc,
dci_id=UNKNOWN_DCI_ID, vrf_prof=None,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
operation='POST'):
"""Send create or update partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
:param desc: description of partition
:dci_id: DCI ID for inter-DC
:vrf_prof: VRF Profile Name
:service_node_ip: Service Node's Address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None or dci_id == UNKNOWN_DCI_ID or (
service_node_ip == UNKNOWN_SRVN_NODE_IP):
part_info = self._get_partition(org_name, part_name)
if vrf_prof is None:
vrf_prof = self.get_partition_vrfProf(org_name, part_name,
part_info=part_info)
if dci_id == UNKNOWN_DCI_ID:
dci_id = self.get_partition_dciId(org_name, part_name,
part_info=part_info)
if service_node_ip == UNKNOWN_SRVN_NODE_IP:
service_node_ip = self.get_partition_serviceNodeIp(
org_name, part_name, part_info=part_info)
url = ((self._create_part_url % (org_name)) if operation == 'POST' else
self._update_part_url % (org_name, part_name))
payload = {
"partitionName": part_name,
"description": part_name if len(desc) == 0 else desc,
"serviceNodeIpAddress": service_node_ip,
"organizationName": org_name}
# Check the DCNM version and find out whether it is need to have
# extra payload for the new version when creating/updating a partition.
if self._is_iplus:
# Need to add extra payload for the new version.
enable_dci = "true" if dci_id and int(dci_id) != 0 else "false"
extra_payload = {
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"dciId": dci_id,
"enableDCIExtension": enable_dci}
payload.update(extra_payload)
return self._send_request(operation, url, payload, 'partition') | Send create or update partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
:param desc: description of partition
:dci_id: DCI ID for inter-DC
:vrf_prof: VRF Profile Name
:service_node_ip: Service Node's Address | entailment |
def _get_partition(self, org_name, part_name=None):
"""send get partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_name is None:
part_name = self._part_name
url = self._update_part_url % (org_name, part_name)
res = self._send_request("GET", url, '', 'partition')
if res and res.status_code in self._resp_ok:
return res.json() | send get partition request to the DCNM.
:param org_name: name of organization
:param part_name: name of partition | entailment |
def update_partition_static_route(self, org_name, part_name,
static_ip_list, vrf_prof=None,
service_node_ip=None):
"""Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address
"""
if part_name is None:
part_name = self._part_name
if vrf_prof is None:
vrf_prof = self.default_vrf_profile
operation = 'PUT'
url = (self._update_part_url % (org_name, part_name))
ip_str = ''
ip_cnt = 0
for ip in static_ip_list:
ip_sub = "$n0" + str(ip_cnt) + "=" + str(ip) + ";"
ip_str = ip_str + ip_sub
ip_cnt = ip_cnt + 1
cfg_args = {
"$vrfName=" + org_name + ':' + part_name + ";"
"$include_serviceNodeIpAddress=" + service_node_ip + ";" + ip_str
}
cfg_args = ';'.join(cfg_args)
payload = {
"partitionName": part_name,
"organizationName": org_name,
"dciExtensionStatus": "Not configured",
"vrfProfileName": vrf_prof,
"vrfName": ':'.join((org_name, part_name)),
"configArg": cfg_args}
res = self._send_request(operation, url, payload, 'partition')
return (res is not None and res.status_code in self._resp_ok) | Send static route update requests to DCNM.
:param org_name: name of organization
:param part_name: name of partition
:static_ip_list: List of static IP addresses
:vrf_prof: VRF Profile
:service_node_ip: Service Node IP address | entailment |
def _delete_org(self, org_name):
"""Send organization delete request to DCNM.
:param org_name: name of organization to be deleted
"""
url = self._del_org_url % (org_name)
return self._send_request('DELETE', url, '', 'organization') | Send organization delete request to DCNM.
:param org_name: name of organization to be deleted | entailment |
def _delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition
"""
url = self._del_part % (org_name, partition_name)
return self._send_request('DELETE', url, '', 'partition') | Send partition delete request to DCNM.
:param org_name: name of organization
:param partition_name: name of partition | entailment |
def _delete_network(self, network_info):
"""Send network delete request to DCNM.
:param network_info: contains network info to be deleted.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
if 'mobDomainName' in network_info:
vlan_id = network_info['vlanId']
mob_dom_name = network_info['mobDomainName']
url = self._network_mob_url % (org_name, part_name, vlan_id,
mob_dom_name)
else:
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('DELETE', url, '', 'network') | Send network delete request to DCNM.
:param network_info: contains network info to be deleted. | entailment |
def _get_network(self, network_info):
"""Send network get request to DCNM.
:param network_info: contains network info to query.
"""
org_name = network_info.get('organizationName', '')
part_name = network_info.get('partitionName', '')
segment_id = network_info['segmentId']
url = self._network_url % (org_name, part_name, segment_id)
return self._send_request('GET', url, '', 'network') | Send network get request to DCNM.
:param network_info: contains network info to query. | entailment |
def _login_request(self, url_login):
"""Internal function to send login request. """
expiration_time = self._exp_time
payload = {'expirationTime': expiration_time}
# TODO(padkrish), after testing with certificates, make the
# verify option configurable.
res = requests.post(url_login,
data=jsonutils.dumps(payload),
headers=self._req_headers,
auth=(self._user, self._pwd),
timeout=self.timeout_resp, verify=False)
session_id = ''
if res and res.status_code in self._resp_ok:
session_id = res.json().get('Dcnm-Token')
self._req_headers.update({'Dcnm-Token': session_id}) | Internal function to send login request. | entailment |
def _logout_request(self, url_logout):
"""Internal logout request to DCNM. """
requests.post(url_logout,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False) | Internal logout request to DCNM. | entailment |
def _send_request(self, operation, url, payload, desc):
"""Send request to DCNM."""
res = None
try:
payload_json = None
if payload and payload != '':
payload_json = jsonutils.dumps(payload)
self._login()
desc_lookup = {'POST': ' creation', 'PUT': ' update',
'DELETE': ' deletion', 'GET': ' get'}
res = requests.request(operation, url, data=payload_json,
headers=self._req_headers,
timeout=self.timeout_resp, verify=False)
desc += desc_lookup.get(operation, operation.lower())
LOG.info("DCNM-send_request: %(desc)s %(url)s %(pld)s",
{'desc': desc, 'url': url, 'pld': payload})
self._logout()
except (requests.HTTPError, requests.Timeout,
requests.ConnectionError) as exc:
LOG.exception('Error during request: %s', exc)
raise dexc.DfaClientRequestFailed(reason=exc)
return res | Send request to DCNM. | entailment |
def config_profile_list(self):
"""Return config profile list from DCNM."""
these_profiles = self._config_profile_list() or []
profile_list = [q for p in these_profiles for q in
[p.get('profileName')]]
return profile_list | Return config profile list from DCNM. | entailment |
def config_profile_fwding_mode_get(self, profile_name):
"""Return forwarding mode of given config profile."""
profile_params = self._config_profile_get(profile_name)
fwd_cli = 'fabric forwarding mode proxy-gateway'
if profile_params and fwd_cli in profile_params['configCommands']:
return 'proxy-gateway'
else:
return 'anycast-gateway' | Return forwarding mode of given config profile. | entailment |
def get_config_profile_for_network(self, net_name):
"""Get the list of profiles."""
cfgplist = self.config_profile_list()
cfgname = net_name.partition(':')[2]
cfgtuple = set()
for cfg_prof in cfgplist:
if cfg_prof.startswith('defaultNetwork'):
cfg_alias = (cfg_prof.split('defaultNetwork')[1].
split('Profile')[0])
elif cfg_prof.endswith('Profile'):
cfg_alias = cfg_prof.split('Profile')[0]
else:
cfg_alias = cfg_prof
cfgtuple.update([(cfg_prof, cfg_alias)])
cfgp = [a for a, b in cfgtuple if cfgname == b]
prof = cfgp[0] if cfgp else self.default_cfg_profile
fwd_mod = self.config_profile_fwding_mode_get(prof)
return (prof, fwd_mod) | Get the list of profiles. | entailment |
def create_network(self, tenant_name, network, subnet,
dhcp_range=True):
"""Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network
"""
seg_id = str(network.segmentation_id)
subnet_ip_mask = subnet.cidr.split('/')
gw_ip = subnet.gateway_ip
cfg_args = [
"$segmentId=" + seg_id,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=0",
"$vrfName=" + tenant_name + ':' + self._part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"segmentId": seg_id,
"vlanId": "0",
"mobilityDomainId": "None",
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": self._part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name,
self._part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info("Creating %s network in DCNM.", network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info("Created %s network in DCNM.", network_info)
else:
LOG.error("Failed to create %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=res) | Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network | entailment |
def create_service_network(self, tenant_name, network, subnet,
dhcp_range=True):
"""Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network
"""
network_info = {}
subnet_ip_mask = subnet.cidr.split('/')
if self._default_md is None:
self._set_default_mobility_domain()
vlan_id = '0'
gw_ip = subnet.gateway_ip
part_name = network.part_name
if not part_name:
part_name = self._part_name
if network.vlan_id:
vlan_id = str(network.vlan_id)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
mob_domain_name = self._default_md
else:
mob_domain_name = None
seg_id = str(network.segmentation_id)
seg_str = "$segmentId=" + seg_id
cfg_args = [
seg_str,
"$netMaskLength=" + subnet_ip_mask[1],
"$gatewayIpAddress=" + gw_ip,
"$networkName=" + network.name,
"$vlanId=" + vlan_id,
"$vrfName=" + tenant_name + ':' + part_name
]
cfg_args = ';'.join(cfg_args)
ip_range = ','.join(["%s-%s" % (p['start'], p['end']) for p in
subnet.allocation_pools])
dhcp_scopes = {'ipRange': ip_range,
'subnet': subnet.cidr,
'gateway': gw_ip}
network_info = {"vlanId": vlan_id,
"mobilityDomainId": mob_domain_name,
"profileName": network.config_profile,
"networkName": network.name,
"configArg": cfg_args,
"organizationName": tenant_name,
"partitionName": part_name,
"description": network.name,
"netmaskLength": subnet_ip_mask[1],
"gateway": gw_ip}
if seg_id:
network_info["segmentId"] = seg_id
if dhcp_range:
network_info["dhcpScope"] = dhcp_scopes
if hasattr(subnet, 'secondary_gw'):
network_info["secondaryGateway"] = subnet.secondary_gw
if self._is_iplus:
# Need to add the vrf name to the network info
prof = self._config_profile_get(network.config_profile)
if prof and prof.get('profileSubType') == 'network:universal':
# For universal profile vrf has to e organization:partition
network_info["vrfName"] = ':'.join((tenant_name, part_name))
else:
# Otherwise, it should be left empty.
network_info["vrfName"] = ""
LOG.info("Creating %s network in DCNM.", network_info)
res = self._create_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.info("Created %s network in DCNM.", network_info)
else:
LOG.error("Failed to create %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | Create network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: network parameters
:param subnet: subnet parameters of the network | entailment |
def delete_network(self, tenant_name, network):
"""Delete network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters
"""
seg_id = network.segmentation_id
network_info = {
'organizationName': tenant_name,
'partitionName': self._part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error("Failed to delete %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=res) | Delete network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters | entailment |
def delete_service_network(self, tenant_name, network):
"""Delete service network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters
"""
network_info = {}
part_name = network.part_name
if not part_name:
part_name = self._part_name
seg_id = str(network.segmentation_id)
if network.vlan:
vlan_id = str(network.vlan)
if network.mob_domain_name is not None:
mob_domain_name = network.mob_domain_name
else:
# The current way will not work since _default_md is obtained
# during create_service_network. It's preferrable to get it
# during init TODO(padkrish)
if self._default_md is None:
self._set_default_mobility_domain()
mob_domain_name = self._default_md
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'mobDomainName': mob_domain_name,
'vlanId': vlan_id,
'segmentId': seg_id,
}
else:
network_info = {
'organizationName': tenant_name,
'partitionName': part_name,
'segmentId': seg_id,
}
LOG.debug("Deleting %s network in DCNM.", network_info)
res = self._delete_network(network_info)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s network in DCNM.", network_info)
else:
LOG.error("Failed to delete %s network in DCNM.",
network_info)
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | Delete service network on the DCNM.
:param tenant_name: name of tenant the network belongs to
:param network: object that contains network parameters | entailment |
def delete_project(self, tenant_name, part_name):
"""Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition.
"""
res = self._delete_partition(tenant_name, part_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", part_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
res = self._delete_org(tenant_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s organization in DCNM.", tenant_name)
else:
LOG.error("Failed to delete %(org)s organization in DCNM."
"Response: %(res)s", {'org': tenant_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | Delete project on the DCNM.
:param tenant_name: name of project.
:param part_name: name of partition. | entailment |
def delete_partition(self, org_name, partition_name):
"""Send partition delete request to DCNM.
:param partition_name: name of partition to be deleted
"""
res = self._delete_partition(org_name, partition_name)
if res and res.status_code in self._resp_ok:
LOG.debug("Deleted %s partition in DCNM.", partition_name)
else:
LOG.error("Failed to delete %(part)s partition in DCNM."
"Response: %(res)s",
({'part': partition_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | Send partition delete request to DCNM.
:param partition_name: name of partition to be deleted | entailment |
def create_project(self, orch_id, org_name, part_name, dci_id, desc=None):
"""Create project on the DCNM.
:param orch_id: orchestrator ID
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_org(orch_id, org_name, desc)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s organization in DCNM.", org_name)
else:
LOG.error("Failed to create %(org)s organization in DCNM."
"Response: %(res)s", {'org': org_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res)
self.create_partition(org_name, part_name, dci_id,
self.default_vrf_profile, desc=desc) | Create project on the DCNM.
:param orch_id: orchestrator ID
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. | entailment |
def update_project(self, org_name, part_name, dci_id=UNKNOWN_DCI_ID,
service_node_ip=UNKNOWN_SRVN_NODE_IP,
vrf_prof=None, desc=None):
"""Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project.
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name, desc,
dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof,
operation='PUT')
if res and res.status_code in self._resp_ok:
LOG.debug("Update %s partition in DCNM.", part_name)
else:
LOG.error("Failed to update %(part)s partition in DCNM."
"Response: %(res)s", {'part': part_name, 'res': res})
raise dexc.DfaClientRequestFailed(reason=res) | Update project on the DCNM.
:param org_name: name of organization.
:param part_name: name of partition.
:param dci_id: Data Center interconnect id.
:param desc: description of project. | entailment |
def create_partition(self, org_name, part_name, dci_id, vrf_prof,
service_node_ip=None, desc=None):
"""Create partition on the DCNM.
:param org_name: name of organization to be created
:param part_name: name of partition to be created
:param dci_id: DCI ID
:vrf_prof: VRF profile for the partition
:param service_node_ip: Specifies the Default route IP address.
:param desc: string that describes organization
"""
desc = desc or org_name
res = self._create_or_update_partition(org_name, part_name,
desc, dci_id=dci_id,
service_node_ip=service_node_ip,
vrf_prof=vrf_prof)
if res and res.status_code in self._resp_ok:
LOG.debug("Created %s partition in DCNM.", part_name)
else:
LOG.error("Failed to create %(part)s partition in DCNM."
"Response: %(res)s", ({'part': part_name, 'res': res}))
raise dexc.DfaClientRequestFailed(reason=self._failure_msg(res)) | Create partition on the DCNM.
:param org_name: name of organization to be created
:param part_name: name of partition to be created
:param dci_id: DCI ID
:vrf_prof: VRF profile for the partition
:param service_node_ip: Specifies the Default route IP address.
:param desc: string that describes organization | entailment |
def get_partition_vrfProf(self, org_name, part_name=None, part_info=None):
"""get VRF Profile for the partition from the DCNM.
:param org_name: name of organization
:param part_name: name of partition
"""
vrf_profile = None
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if ("vrfProfileName" in part_info):
vrf_profile = part_info.get("vrfProfileName")
return vrf_profile | get VRF Profile for the partition from the DCNM.
:param org_name: name of organization
:param part_name: name of partition | entailment |
def get_partition_dciId(self, org_name, part_name, part_info=None):
"""get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition
"""
if part_info is None:
part_info = self._get_partition(org_name, part_name)
LOG.info("query result from dcnm for partition info is %s",
part_info)
if part_info is not None and "dciId" in part_info:
return part_info.get("dciId") | get DCI ID for the partition.
:param org_name: name of organization
:param part_name: name of partition | entailment |
def list_networks(self, org, part):
"""Return list of networks from DCNM.
:param org: name of organization.
:param part: name of partition.
"""
if org and part:
list_url = self._del_part + '/networks'
list_url = list_url % (org, part)
res = self._send_request('GET', list_url, '', 'networks')
if res and res.status_code in self._resp_ok:
return res.json() | Return list of networks from DCNM.
:param org: name of organization.
:param part: name of partition. | entailment |
def list_organizations(self):
"""Return list of organizations from DCNM."""
try:
res = self._send_request('GET', self._org_url, '', 'organizations')
if res and res.status_code in self._resp_ok:
return res.json()
except dexc.DfaClientRequestFailed:
LOG.error("Failed to send request to DCNM.") | Return list of organizations from DCNM. | entailment |
def get_network(self, org, segid):
"""Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network.
"""
network_info = {
'organizationName': org,
'partitionName': self._part_name,
'segmentId': segid,
}
res = self._get_network(network_info)
if res and res.status_code in self._resp_ok:
return res.json() | Return given network from DCNM.
:param org: name of organization.
:param segid: segmentation id of the network. | entailment |
def get_version(self):
"""Get the DCNM version."""
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version')
except dexc.DfaClientRequestFailed as exc:
LOG.error("Failed to get DCNM version.")
sys.exit("ERROR: Failed to connect to DCNM: %s", exc) | Get the DCNM version. | entailment |
def fill_urls(self):
"""This assigns the URL's based on the protocol. """
protocol = self.dcnm_protocol
self._org_url = '%s://%s/rest/auto-config/organizations' % (
(protocol, self._ip))
self._create_network_url = ('%s://%s/' % (protocol, self._ip) +
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self.host_protocol_url = '%s://%s/' % (protocol, self._ip)
self._create_network_url = self._build_url(
'rest/auto-config/organizations'
'/%s/partitions/%s/networks')
self._cfg_profile_list_url = '%s://%s/rest/auto-config/profiles' % (
(protocol, self._ip))
self._cfg_profile_get_url = self._cfg_profile_list_url + '/%s'
self._global_settings_url = self._build_url(
'rest/auto-config/settings')
self._create_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions')
self._update_part_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._del_org_url = self._build_url(
'rest/auto-config/organizations/%s')
self._del_part = self._build_url(
'rest/auto-config/organizations/%s/partitions/%s')
self._network_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/segment/%s')
self._network_mob_url = self._build_url(
'rest/auto-config/organizations/%s/partitions/'
'%s/networks/vlan/%s/mobility-domain/%s')
self._segmentid_ranges_url = self._build_url(
'rest/settings/segmentid-ranges')
self._login_url = self._build_url('rest/logon')
self._logout_url = self._build_url('rest/logout') | This assigns the URL's based on the protocol. | entailment |
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
attr.PLURALS.update(plural_mappings)
action_map = {DEVICE: {'get_hosting_device_config': 'GET'}}
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.DEVICE_MANAGER,
action_map=action_map) | Returns Ext Resources. | entailment |
def get_inspector():
"""Reuse inspector"""
global _INSPECTOR
if _INSPECTOR:
return _INSPECTOR
else:
bind = op.get_bind()
_INSPECTOR = sa.engine.reflection.Inspector.from_engine(bind)
return _INSPECTOR | Reuse inspector | entailment |
def get_data():
"""Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists.
"""
output = []
tables = get_tables()
for table in tables:
columns = get_columns(table)
for column in columns:
if column['name'] == 'tenant_id':
output.append((table, column))
return output | Returns combined list of tuples: [(table, column)].
List is built, based on retrieved tables, where column with name
``tenant_id`` exists. | entailment |
def contract_creation_exceptions():
"""Special migration for the blueprint to support Keystone V3.
We drop all tenant_id columns and create project_id columns instead.
"""
return {
sa.Column: ['.'.join([table, 'project_id']) for table in get_tables()],
sa.Index: get_tables()
} | Special migration for the blueprint to support Keystone V3.
We drop all tenant_id columns and create project_id columns instead. | entailment |
def _agent_notification_bulk(self, context, method, routers,
hosting_device, operation):
"""Notify the Cisco cfg agent handling a particular hosting_device.
A single notification can contain multiple routers.
"""
admin_context = context.is_admin and context or context.elevated()
dmplugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER)
if (hosting_device is not None and extensions.is_extension_supported(
dmplugin, CFGAGENT_SCHED)):
agents = dmplugin.get_cfg_agents_for_hosting_devices(
admin_context, [hosting_device['id']], admin_state_up=True,
schedule=True)
if agents:
agent = agents[0]
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
'message %(method)s [BULK]',
{'agent_type': agent.agent_type,
'topic': CFG_AGENT_L3_ROUTING,
'host': agent.host,
'method': method})
cctxt = self.client.prepare(server=agent.host,
version='1.1')
cctxt.cast(context, method, routers=routers) | Notify the Cisco cfg agent handling a particular hosting_device.
A single notification can contain multiple routers. | entailment |
def _notification(self, context, method, routers, operation,
shuffle_agents):
"""Notify all or individual Cisco cfg agents."""
if extensions.is_extension_supported(self._l3plugin, L3AGENT_SCHED):
adm_context = (context.is_admin and context or context.elevated())
# This is where hosting device gets scheduled to Cisco cfg agent
self._l3plugin.schedule_routers(adm_context, routers)
self._agent_notification(
context, method, routers, operation, shuffle_agents)
else:
cctxt = self.client.prepare(topics=topics.L3_AGENT, fanout=True)
cctxt.cast(context, method, routers=[r['id'] for r in routers]) | Notify all or individual Cisco cfg agents. | entailment |
def routers_updated(self, context, routers, operation=None, data=None,
shuffle_agents=False):
"""Notify cfg agents about configuration changes to routers.
This includes operations performed on the router like when a
router interface is added or removed.
"""
if routers:
self._notification(context, 'routers_updated', routers, operation,
shuffle_agents) | Notify cfg agents about configuration changes to routers.
This includes operations performed on the router like when a
router interface is added or removed. | entailment |
def router_removed_from_hosting_device(self, context, router):
"""Notify cfg agent about router removed from hosting device."""
self._notification(context, 'router_removed_from_hosting_device',
[router], operation=None, shuffle_agents=False) | Notify cfg agent about router removed from hosting device. | entailment |
def router_added_to_hosting_device(self, context, router):
"""Notify cfg agent about router added to hosting device."""
self._notification(context, 'router_added_to_hosting_device',
[router], operation=None, shuffle_agents=False) | Notify cfg agent about router added to hosting device. | entailment |
def routers_removed_from_hosting_device(self, context, router_ids,
hosting_device):
"""Notify cfg agent that routers have been removed from hosting device.
@param: context - information about tenant, user etc
@param: router-ids - list of ids
@param: hosting_device - device hosting the routers
"""
self._agent_notification_bulk(
context, 'router_removed_from_hosting_device', router_ids,
hosting_device, operation=None) | Notify cfg agent that routers have been removed from hosting device.
@param: context - information about tenant, user etc
@param: router-ids - list of ids
@param: hosting_device - device hosting the routers | entailment |
def _cb_dcnm_msg(self, method, body):
"""Callback function to process DCNM network creation/update/deletion
message received by AMQP.
It also communicates with DCNM to extract info for CPNR record
insertion/deletion.
:param pika.channel.Channel ch: The channel instance.
:param pika.Spec.Basic.Deliver method: The basic deliver method
which includes routing key.
:param pika.Spec.BasicProperties properties: properties
:param str body: The message body.
"""
LOG.debug('Routing_key: %(key)s, body: %(body)s.',
{'key': method.routing_key, 'body': body})
partition_keyword = 'auto-config.organization.partition'
network_keyword = partition_keyword + '.network'
network_create_key = network_keyword + '.create'
network_update_key = network_keyword + '.update'
msg = jsonutils.loads(body)
LOG.debug('_cb_dcnm_msg: RX message: %s', msg)
if not msg:
LOG.debug("error, return")
return
url = msg['link']
url_fields = url.split('/')
pre_project_name = url_fields[4]
pre_partition_name = url_fields[6]
pre_seg_id = url_fields[9]
data = {"project_name": pre_project_name,
"partition_name": pre_partition_name,
"segmentation_id": pre_seg_id}
if network_create_key in method.routing_key or (
network_update_key in method.routing_key):
pri = self._create_pri
event_type = 'dcnm.network.create'
else:
pri = self._delete_pri
event_type = 'dcnm.network.delete'
if self._pq is not None:
payload = (event_type, data)
self._pq.put((pri, time.ctime, payload)) | Callback function to process DCNM network creation/update/deletion
message received by AMQP.
It also communicates with DCNM to extract info for CPNR record
insertion/deletion.
:param pika.channel.Channel ch: The channel instance.
:param pika.Spec.Basic.Deliver method: The basic deliver method
which includes routing key.
:param pika.Spec.BasicProperties properties: properties
:param str body: The message body. | entailment |
def process_amqp_msgs(self):
"""Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue.
"""
LOG.info('Starting process_amqp_msgs...')
while True:
(mtd_fr, hdr_fr, body) = (None, None, None)
try:
if self.consume_channel:
(mtd_fr, hdr_fr, body) = self.consume_channel.basic_get(
self._dcnm_queue_name)
if mtd_fr:
# Queue has messages.
LOG.info('RX message: %s', body)
self._cb_dcnm_msg(mtd_fr, body)
self.consume_channel.basic_ack(mtd_fr.delivery_tag)
else:
# Queue is empty.
try:
self._conn.sleep(1)
except AttributeError:
time.sleep(1)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_str = traceback.format_exception(exc_type,
exc_value, exc_tb)
LOG.exception("Failed to read from queue: %(queue)s "
"%(exc_type)s, %(exc_value)s, %(exc_tb)s.", {
'queue': self._dcnm_queue_name,
'exc_type': exc_type,
'exc_value': exc_value,
'exc_tb': tb_str}) | Process AMQP queue messages.
It connects to AMQP server and calls callbacks to process DCNM events,
i.e. routing key containing '.cisco.dcnm.', once they arrive in the
queue. | entailment |
def _is_pingable(ip):
"""Checks whether an IP address is reachable by pinging.
Use linux utils to execute the ping (ICMP ECHO) command.
Sends 5 packets with an interval of 0.2 seconds and timeout of 1
seconds. Runtime error implies unreachability else IP is pingable.
:param ip: IP to check
:return: bool - True or False depending on pingability.
"""
ping_cmd = ['ping',
'-c', '5',
'-W', '1',
'-i', '0.2',
ip]
try:
linux_utils.execute(ping_cmd, check_exit_code=True)
return True
except RuntimeError:
LOG.warning("Cannot ping ip address: %s", ip)
return False | Checks whether an IP address is reachable by pinging.
Use linux utils to execute the ping (ICMP ECHO) command.
Sends 5 packets with an interval of 0.2 seconds and timeout of 1
seconds. Runtime error implies unreachability else IP is pingable.
:param ip: IP to check
:return: bool - True or False depending on pingability. | entailment |
def _can_connect(ip, port):
"""Checks if a TCP port at IP address is possible to connect to"""
cs = socket.socket()
try:
cs.connect((ip, port))
cs.close()
return True
except socket.error:
return False | Checks if a TCP port at IP address is possible to connect to | entailment |
def get_dead_hosting_devices_info(self):
"""
Get a list of hosting devices that have been marked dead
:return: List of dead hosting device ids
"""
res = []
for hd_id in self.hosting_devices_backlog:
hd = self.hosting_devices_backlog[hd_id]['hd']
if hd['hd_state'] == cc.HD_DEAD:
res.append(hd['id'])
return res | Get a list of hosting devices that have been marked dead
:return: List of dead hosting device ids | entailment |
def get_monitored_hosting_devices_info(self, hd_state_filter=None):
"""
This function returns a list of all hosting devices monitored
by this agent
"""
wait_time = datetime.timedelta(
seconds=cfg.CONF.cfg_agent.hosting_device_dead_timeout)
resp = []
for hd_id in self.hosting_devices_backlog:
hd = self.hosting_devices_backlog[hd_id]['hd']
display_hd = True
if hd_state_filter is not None:
if hd['hd_state'] == hd_state_filter:
display_hd = True
else:
display_hd = False
if display_hd:
created_time = hd['created_at']
boottime = datetime.timedelta(seconds=hd['booting_time'])
backlogged_at = hd['backlog_insertion_ts']
booted_at = created_time + boottime
dead_at = backlogged_at + wait_time
resp.append({'host id': hd['id'],
'hd_state': hd['hd_state'],
'created at': str(created_time),
'backlogged at': str(backlogged_at),
'estimate booted at': str(booted_at),
'considered dead at': str(dead_at)})
else:
continue
return resp | This function returns a list of all hosting devices monitored
by this agent | entailment |
def is_hosting_device_reachable(self, hosting_device):
"""Check the hosting device which hosts this resource is reachable.
If the resource is not reachable, it is added to the backlog.
* heartbeat revision
We want to enqueue all hosting-devices into the backlog for
monitoring purposes
adds key/value pairs to hd (aka hosting_device dictionary)
_is_pingable : if it returns true,
hd['hd_state']='Active'
_is_pingable : if it returns false,
hd['hd_state']='Unknown'
:param hosting_device : dict of the hosting device
:returns: True if device is reachable, else None
"""
ret_val = False
hd = hosting_device
hd_id = hosting_device['id']
hd_mgmt_ip = hosting_device['management_ip_address']
dead_hd_list = self.get_dead_hosting_devices_info()
if hd_id in dead_hd_list:
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is already marked as"
" Dead. It is assigned as non-reachable",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
return False
# Modifying the 'created_at' to a date time object if it is not
if not isinstance(hd['created_at'], datetime.datetime):
hd['created_at'] = datetime.datetime.strptime(hd['created_at'],
'%Y-%m-%d %H:%M:%S')
if _is_pingable(hd_mgmt_ip):
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
hd['hd_state'] = cc.HD_ACTIVE
ret_val = True
else:
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
hd['hd_state'] = cc.HD_NOT_RESPONDING
ret_val = False
if self.enable_heartbeat is True or ret_val is False:
self.backlog_hosting_device(hd)
return ret_val | Check the hosting device which hosts this resource is reachable.
If the resource is not reachable, it is added to the backlog.
* heartbeat revision
We want to enqueue all hosting-devices into the backlog for
monitoring purposes
adds key/value pairs to hd (aka hosting_device dictionary)
_is_pingable : if it returns true,
hd['hd_state']='Active'
_is_pingable : if it returns false,
hd['hd_state']='Unknown'
:param hosting_device : dict of the hosting device
:returns: True if device is reachable, else None | entailment |
def check_backlogged_hosting_devices(self, driver_mgr):
"""Checks the status of backlogged hosting devices.
Skips newly spun up instances during their booting time as specified
in the boot time parameter.
Each hosting-device tracked has a key, hd_state, that represents the
last known state for the hosting device. Valid values for hd_state
are ['Active', 'Unknown', 'Dead']
Each time check_backlogged_hosting_devices is invoked, a ping-test
is performed to determine the current state. If the current state
differs, hd_state is updated.
The hd_state transitions/actions are represented by the following
table.
+------------+---------------------+----------------+----------------+
| current / | Active | Unknown | Dead |
| last state | | | |
+============+=====================+================+================+
| Active | Device is reachable.| Device was | Dead device |
| | No state change | temporarily | recovered. |
| | | unreachable. | Trigger resync |
+------------+---------------------+----------------+----------------+
| Unknown | Device connectivity | Device | Not a valid |
| | test failed. Set | connectivity | state |
| | backlog timestamp | test failed. | transition. |
| | and wait for dead | Dead timeout | |
| | timeout to occur. | has not | |
| | | occurred yet. | |
+------------+---------------------+----------------+----------------+
| Dead | Not a valid state | Dead timeout | Device is |
| | transition. | for device has | still dead. |
| | | elapsed. | No state |
| | | Notify plugin | change. |
+------------+---------------------+----------------+----------------+
:returns: A dict of the format:
::
{"reachable": [<hd_id>,..],
"dead": [<hd_id>,..],
"revived": [<hd_id>,..]}
* reachable - a list of hosting devices that are now reachable
* dead - a list of hosting devices deemed dead
* revived - a list of hosting devices (dead to active)
"""
response_dict = {'reachable': [], 'revived': [], 'dead': []}
LOG.debug("Current Backlogged hosting devices: \n%s\n",
self.hosting_devices_backlog.keys())
for hd_id in self.hosting_devices_backlog.keys():
hd = self.hosting_devices_backlog[hd_id]['hd']
if not timeutils.is_older_than(hd['created_at'],
hd['booting_time']):
LOG.info("Hosting device: %(hd_id)s @ %(ip)s hasn't "
"passed minimum boot time. Skipping it. ",
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
continue
LOG.info("Checking hosting device: %(hd_id)s @ %(ip)s for "
"reachability.", {'hd_id': hd_id,
'ip': hd['management_ip_address']})
hd_state = hd['hd_state']
if _is_pingable(hd['management_ip_address']):
if hd_state == cc.HD_NOT_RESPONDING:
LOG.debug("hosting devices revived & reachable, %s" %
(pprint.pformat(hd)))
hd['hd_state'] = cc.HD_ACTIVE
# hosting device state
response_dict['reachable'].append(hd_id)
elif hd_state == cc.HD_DEAD:
# test if management port is available
if _can_connect(hd['management_ip_address'],
hd['protocol_port']) is True:
LOG.debug("Dead hosting devices revived %s" %
(pprint.pformat(hd)))
hd['hd_state'] = cc.HD_ACTIVE
response_dict['revived'].append(hd_id)
else:
LOG.debug("Cannot connect to management port %(p)d on "
"hosting device with ip %(ip)s",
{'p': hd['protocol_port'],
'ip': hd['management_ip_address']})
else:
LOG.debug("No-op."
"_is_pingable is True and current"
" hd['hd_state']=%s" % hd_state)
LOG.info("Hosting device: %(hd_id)s @ %(ip)s is now "
"reachable. Adding it to response",
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
else:
LOG.info("Hosting device: %(hd_id)s %(hd_state)s"
" @ %(ip)s not reachable ",
{'hd_id': hd_id,
'hd_state': hd['hd_state'],
'ip': hd['management_ip_address']})
if hd_state == cc.HD_ACTIVE:
LOG.debug("hosting device lost connectivity, %s" %
(pprint.pformat(hd)))
hd['backlog_insertion_ts'] = timeutils.utcnow()
hd['hd_state'] = cc.HD_NOT_RESPONDING
elif hd_state == cc.HD_NOT_RESPONDING:
if timeutils.is_older_than(
hd['backlog_insertion_ts'],
cfg.CONF.cfg_agent.hosting_device_dead_timeout):
# current hd_state is now dead, previous state: Unknown
hd['hd_state'] = cc.HD_DEAD
LOG.debug("Hosting device: %(hd_id)s @ %(ip)s hasn't "
"been reachable for the "
"last %(time)d seconds. "
"Marking it dead.",
{'hd_id': hd_id,
'ip': hd['management_ip_address'],
'time': cfg.CONF.cfg_agent.
hosting_device_dead_timeout})
response_dict['dead'].append(hd_id)
LOG.debug("Response: %s", response_dict)
return response_dict | Checks the status of backlogged hosting devices.
Skips newly spun up instances during their booting time as specified
in the boot time parameter.
Each hosting-device tracked has a key, hd_state, that represents the
last known state for the hosting device. Valid values for hd_state
are ['Active', 'Unknown', 'Dead']
Each time check_backlogged_hosting_devices is invoked, a ping-test
is performed to determine the current state. If the current state
differs, hd_state is updated.
The hd_state transitions/actions are represented by the following
table.
+------------+---------------------+----------------+----------------+
| current / | Active | Unknown | Dead |
| last state | | | |
+============+=====================+================+================+
| Active | Device is reachable.| Device was | Dead device |
| | No state change | temporarily | recovered. |
| | | unreachable. | Trigger resync |
+------------+---------------------+----------------+----------------+
| Unknown | Device connectivity | Device | Not a valid |
| | test failed. Set | connectivity | state |
| | backlog timestamp | test failed. | transition. |
| | and wait for dead | Dead timeout | |
| | timeout to occur. | has not | |
| | | occurred yet. | |
+------------+---------------------+----------------+----------------+
| Dead | Not a valid state | Dead timeout | Device is |
| | transition. | for device has | still dead. |
| | | elapsed. | No state |
| | | Notify plugin | change. |
+------------+---------------------+----------------+----------------+
:returns: A dict of the format:
::
{"reachable": [<hd_id>,..],
"dead": [<hd_id>,..],
"revived": [<hd_id>,..]}
* reachable - a list of hosting devices that are now reachable
* dead - a list of hosting devices deemed dead
* revived - a list of hosting devices (dead to active) | entailment |
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN.
"""
return super(CiscoRouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=bc.constants.FLOATINGIP_STATUS_DOWN) | Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN. | entailment |
def attach_intf_router(self, tenant_id, tenant_name, router_id):
"""Routine to attach the interface to the router. """
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
# Modify Hard coded Name fixme
subnet_lst = set()
subnet_lst.add(in_sub)
subnet_lst.add(out_sub)
ret = self.os_helper.add_intf_router(router_id, tenant_id, subnet_lst)
return ret, in_sub, out_sub | Routine to attach the interface to the router. | entailment |
def get_router_id(self, tenant_id, tenant_name):
"""Retrieve the router ID. """
router_id = None
if tenant_id in self.tenant_dict:
router_id = self.tenant_dict.get(tenant_id).get('router_id')
if not router_id:
router_list = self.os_helper.get_rtr_by_name(
'FW_RTR_' + tenant_name)
if len(router_list) > 0:
router_id = router_list[0].get('id')
return router_id | Retrieve the router ID. | entailment |
def delete_intf_router(self, tenant_id, tenant_name, router_id):
"""Routine to delete the router. """
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
subnet_lst = set()
subnet_lst.add(in_sub)
subnet_lst.add(out_sub)
router_id = self.get_router_id(tenant_id, tenant_name)
if router_id:
ret = self.os_helper.delete_intf_router(tenant_name, tenant_id,
router_id, subnet_lst)
if not ret:
LOG.error("Failed to delete router intf id %(rtr)s, "
"tenant %(tenant)s",
{'rtr': router_id, 'tenant': tenant_id})
return ret
LOG.error("Invalid router ID, can't delete interface from "
"router") | Routine to delete the router. | entailment |
def prepare_router_vm_msg(self, tenant_id, tenant_name, router_id, net_id,
subnet_id, seg, status):
"""Prepare the message to be sent to Event queue for VDP trigger.
This is actually called for a subnet add to a router. This function
prepares a VM's VNIC create/delete message.
"""
max_get_router_info_retry = True
attempt = 0
while max_get_router_info_retry:
port_data = self.os_helper.get_router_port_subnet(subnet_id)
if port_data is None:
LOG.error("Unable to get router port data")
return None
if port_data.get('binding:host_id') == '':
time.sleep(3)
attempt += 1
if attempt > 3:
max_get_router_info_retry = False
LOG.error("Unable to get router binding host data, "
"Max attempts reached")
else:
max_get_router_info_retry = False
if status is 'up':
event_type = 'service.vnic.create'
else:
event_type = 'service.vnic.delete'
vnic_data = {'status': status, 'mac': port_data.get('mac_address'),
'segid': seg, 'host': port_data.get('binding:host_id')}
if vnic_data['host'] == '':
LOG.error("Null host for seg %(seg)s subnet %(subnet)s",
{'seg': seg, 'subnet': subnet_id})
if self.tenant_dict.get(tenant_id).get('host') is None:
LOG.error("Null host for tenant %(tenant)s seg %(seg)s "
"subnet %(subnet)s",
{'tenant': tenant_id, 'seg': seg,
'subnet': subnet_id})
return None
else:
vnic_data['host'] = self.tenant_dict.get(tenant_id).get('host')
else:
self.tenant_dict[tenant_id]['host'] = vnic_data['host']
vm_ip = port_data.get('fixed_ips')[0].get('ip_address')
vnic_data.update({'port_id': port_data.get('id'), 'network_id': net_id,
'vm_name': 'FW_SRVC_RTR_' + tenant_name,
'vm_ip': vm_ip, 'vm_uuid': router_id, 'gw_mac': None,
'fwd_mod': 'anycast_gateway'})
payload = {'service': vnic_data}
data = (event_type, payload)
return data | Prepare the message to be sent to Event queue for VDP trigger.
This is actually called for a subnet add to a router. This function
prepares a VM's VNIC create/delete message. | entailment |
def send_router_port_msg(self, tenant_id, tenant_name, router_id, net_id,
subnet_id, seg, status):
"""Sends the router port message to the queue. """
data = self.prepare_router_vm_msg(tenant_id, tenant_name, router_id,
net_id, subnet_id, seg, status)
if data is None:
return False
timestamp = time.ctime()
pri = Q_PRIORITY
LOG.info("Sending native FW data into queue %(data)s",
{'data': data})
self.que_obj.put((pri, timestamp, data))
return True | Sends the router port message to the queue. | entailment |
def update_dcnm_partition_static_route(self, tenant_id, arg_dict):
"""Add static route in DCNM's partition.
This gets pushed to the relevant leaf switches.
"""
ip_list = self.os_helper.get_subnet_nwk_excl(tenant_id,
arg_dict.get('excl_list'))
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
arg_dict.get('tenant_name'), fw_const.SERV_PART_NAME, ip_list,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static "
"route %s", arg_dict.get('router_id'))
self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'),
arg_dict.get('router_id'))
return False
return True | Add static route in DCNM's partition.
This gets pushed to the relevant leaf switches. | entailment |
def _create_arg_dict(self, tenant_id, data, in_sub, out_sub):
"""Create the argument dictionary. """
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
in_ip_dict = self.get_in_ip_addr(tenant_id)
out_ip_dict = self.get_out_ip_addr(tenant_id)
excl_list = [in_ip_dict.get('subnet'), out_ip_dict.get('subnet')]
arg_dict = {'tenant_id': tenant_id,
'tenant_name': data.get('tenant_name'),
'in_seg': in_seg, 'in_vlan': in_vlan,
'out_seg': out_seg, 'out_vlan': out_vlan,
'router_id': data.get('router_id'),
'in_sub': in_sub, 'out_sub': out_sub,
'in_gw': in_ip_dict.get('gateway'),
'out_gw': out_ip_dict.get('gateway'),
'excl_list': excl_list}
return arg_dict | Create the argument dictionary. | entailment |
def _create_fw(self, tenant_id, data):
"""Internal routine that gets called when a FW is created. """
LOG.debug("In creating Native FW data is %s", data)
# TODO(padkrish):
# Check if router is already added and only then add, needed for
# restart cases since native doesn't have a special DB
ret, in_sub, out_sub = self.attach_intf_router(tenant_id,
data.get('tenant_name'),
data.get('router_id'))
if not ret:
LOG.error("Native FW: Attach intf router failed for tenant "
"%s", tenant_id)
return False
self.create_tenant_dict(tenant_id, data.get('router_id'))
arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub)
# Program DCNM to update profile's static IP address on OUT part
ret = self.update_dcnm_partition_static_route(tenant_id, arg_dict)
if not ret:
return False
# Program the default GW in router namespace
ret = self.program_default_gw(tenant_id, arg_dict)
if not ret:
return False
# Program router namespace to have all tenant networks to be routed
# to IN service network
ret = self.program_next_hop(tenant_id, arg_dict)
if not ret:
return False
# Send message for router port auto config for in service nwk
ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'up')
if not ret:
return False
# Send message for router port auto config for out service nwk
return self.send_out_router_port_msg(tenant_id, arg_dict, 'up') | Internal routine that gets called when a FW is created. | entailment |
def create_fw(self, tenant_id, data):
"""Top level routine called when a FW is created. """
try:
return self._create_fw(tenant_id, data)
except Exception as exc:
LOG.error("Failed to create FW for device native, tenant "
"%(tenant)s data %(data)s Exc %(exc)s",
{'tenant': tenant_id, 'data': data, 'exc': exc})
return False | Top level routine called when a FW is created. | entailment |
def _delete_fw(self, tenant_id, data):
"""Internal routine called when a FW is deleted. """
LOG.debug("In Delete fw data is %s", data)
in_sub = self.get_in_subnet_id(tenant_id)
out_sub = self.get_out_subnet_id(tenant_id)
arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub)
if arg_dict.get('router_id') is None:
LOG.error("Router ID unknown for tenant %s", tenant_id)
return False
if tenant_id not in self.tenant_dict:
self.create_tenant_dict(tenant_id, arg_dict.get('router_id'))
ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'down')
if not ret:
return False
ret = self.send_out_router_port_msg(tenant_id, arg_dict, 'down')
if not ret:
return False
# Usually sending message to queue doesn't fail!!!
router_ret = self.delete_intf_router(tenant_id,
arg_dict.get('tenant_name'),
arg_dict.get('router_id'))
if not router_ret:
LOG.error("Unable to delete router for tenant %s, error case",
tenant_id)
return router_ret
del self.tenant_dict[tenant_id]
return router_ret | Internal routine called when a FW is deleted. | entailment |
def delete_fw(self, tenant_id, data):
"""Top level routine called when a FW is deleted. """
try:
ret = self._delete_fw(tenant_id, data)
return ret
except Exception as exc:
LOG.error("Failed to delete FW for device native, tenant "
"%(tenant)s data %(data)s Exc %(exc)s",
{'tenant': tenant_id, 'data': data, 'exc': exc})
return False | Top level routine called when a FW is deleted. | entailment |
def _program_dcnm_static_route(self, tenant_id, tenant_name):
"""Program DCNM Static Route. """
in_ip_dict = self.get_in_ip_addr(tenant_id)
in_gw = in_ip_dict.get('gateway')
in_ip = in_ip_dict.get('subnet')
if in_gw is None:
LOG.error("No FW service GW present")
return False
out_ip_dict = self.get_out_ip_addr(tenant_id)
out_ip = out_ip_dict.get('subnet')
# Program DCNM to update profile's static IP address on OUT part
excl_list = []
excl_list.append(in_ip)
excl_list.append(out_ip)
subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list,
excl_part=True)
# This count is for telling DCNM to insert the static route in a
# particular position. Total networks created - exclusive list as
# above - the network that just got created.
srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id)
ret = self.dcnm_obj.update_partition_static_route(
tenant_name, fw_const.SERV_PART_NAME, subnet_lst,
vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile,
service_node_ip=srvc_node_ip)
if not ret:
LOG.error("Unable to update DCNM ext profile with static "
"route")
return False
return True | Program DCNM Static Route. | entailment |
def network_create_notif(self, tenant_id, tenant_name, cidr):
"""Tenant Network create Notification.
Restart is not supported currently for this. fixme(padkrish).
"""
router_id = self.get_router_id(tenant_id, tenant_name)
if not router_id:
LOG.error("Rout ID not present for tenant")
return False
ret = self._program_dcnm_static_route(tenant_id, tenant_name)
if not ret:
LOG.error("Program DCNM with static routes failed "
"for router %s", router_id)
return False
# Program router namespace to have this network to be routed
# to IN service network
in_ip_dict = self.get_in_ip_addr(tenant_id)
in_gw = in_ip_dict.get('gateway')
if in_gw is None:
LOG.error("No FW service GW present")
return False
ret = self.os_helper.program_rtr_nwk_next_hop(router_id, in_gw, cidr)
if not ret:
LOG.error("Unable to program default router next hop %s",
router_id)
return False
return True | Tenant Network create Notification.
Restart is not supported currently for this. fixme(padkrish). | entailment |
def network_delete_notif(self, tenant_id, tenant_name, network_id):
"""Tenant Network delete Notification.
Restart is not supported currently for this. fixme(padkrish).
"""
router_id = self.get_router_id(tenant_id, tenant_name)
if router_id is None:
LOG.error("Rout ID not present for tenant")
return False
ret = self._program_dcnm_static_route(tenant_id, tenant_name)
if not ret:
LOG.error("Program DCNM with static routes failed for "
"router %s", router_id)
return False
# Program router namespace to have this network to be routed
# to IN service network
in_ip_dict = self.get_in_ip_addr(tenant_id)
in_gw = in_ip_dict.get('gateway')
in_ip = in_ip_dict.get('subnet')
if in_gw is None:
LOG.error("No FW service GW present")
return False
out_ip_dict = self.get_out_ip_addr(tenant_id)
out_ip = out_ip_dict.get('subnet')
excl_list = []
excl_list.append(in_ip)
excl_list.append(out_ip)
subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list,
excl_part=True)
ret = self.os_helper.remove_rtr_nwk_next_hop(router_id, in_gw,
subnet_lst, excl_list)
if not ret:
LOG.error("Unable to program default router next hop %s",
router_id)
return False
return True | Tenant Network delete Notification.
Restart is not supported currently for this. fixme(padkrish). | entailment |
def create_hosting_device_resources(self, context, complementary_id,
tenant_id, mgmt_context, max_hosted):
"""Create resources for a hosting device in a plugin specific way."""
mgmt_port = None
if mgmt_context and mgmt_context.get('mgmt_nw_id') and tenant_id:
# Create port for mgmt interface
p_spec = {'port': {
'tenant_id': tenant_id,
'admin_state_up': True,
'name': 'mgmt',
'network_id': mgmt_context['mgmt_nw_id'],
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': self._mgmt_subnet_spec(context, mgmt_context),
'device_id': "",
# Use device_owner attribute to ensure we can query for these
# ports even before Nova has set device_id attribute.
'device_owner': complementary_id}}
try:
mgmt_port = self._core_plugin.create_port(context, p_spec)
except n_exc.NeutronException as e:
LOG.error('Error %s when creating management port. '
'Cleaning up.', e)
self.delete_hosting_device_resources(
context, tenant_id, mgmt_port)
mgmt_port = None
# We are setting the 'ports' to an empty list as it is expected by
# the callee: device_handling_db._create_svc_vm_hosting_devices()
return {'mgmt_port': mgmt_port, 'ports': []} | Create resources for a hosting device in a plugin specific way. | entailment |
def get_hosting_device_resources(self, context, id, complementary_id,
tenant_id, mgmt_nw_id):
"""Returns information about all resources for a hosting device."""
mgmt_port = None
# Ports for hosting device may not yet have 'device_id' set to
# Nova assigned uuid of VM instance. However, those ports will still
# have 'device_owner' attribute set to complementary_id. Hence, we
# use both attributes in the query to ensure we find all ports.
query = context.session.query(models_v2.Port)
query = query.filter(expr.or_(
models_v2.Port.device_id == id,
models_v2.Port.device_owner == complementary_id))
for port in query:
if port['network_id'] != mgmt_nw_id:
raise Exception
else:
mgmt_port = port
return {'mgmt_port': mgmt_port} | Returns information about all resources for a hosting device. | entailment |
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
**kwargs):
"""Deletes resources for a hosting device in a plugin specific way."""
if mgmt_port is not None:
try:
self._cleanup_hosting_port(context, mgmt_port['id'])
except n_exc.NeutronException as e:
LOG.error("Unable to delete port:%(port)s after %(tries)d"
" attempts due to exception %(exception)s. "
"Skipping it", {'port': mgmt_port['id'],
'tries': DELETION_ATTEMPTS,
'exception': str(e)}) | Deletes resources for a hosting device in a plugin specific way. | entailment |
def setup_logical_port_connectivity(self, context, port_db,
hosting_device_id):
"""Establishes connectivity for a logical port.
This is done by hot plugging the interface(VIF) corresponding to the
port from the VM.
"""
hosting_port = port_db.hosting_info.hosting_port
if hosting_port:
try:
self._dev_mgr.svc_vm_mgr.interface_attach(hosting_device_id,
hosting_port.id)
LOG.debug("Setup logical port completed for port:%s",
port_db.id)
except nova_exc.Conflict as e:
# VM is still in vm_state building
LOG.debug("Failed to attach interface - spawn thread "
"error %(error)s", {'error': str(e)})
self._gt_pool.spawn_n(self._attach_hosting_port,
hosting_device_id, hosting_port.id)
except Exception as e:
LOG.error("Failed to attach interface mapped to port:"
"%(p_id)s on hosting device:%(hd_id)s due to "
"error %(error)s", {'p_id': hosting_port.id,
'hd_id': hosting_device_id,
'error': str(e)}) | Establishes connectivity for a logical port.
This is done by hot plugging the interface(VIF) corresponding to the
port from the VM. | entailment |
def teardown_logical_port_connectivity(self, context, port_db,
hosting_device_id):
"""Removes connectivity for a logical port.
Unplugs the corresponding data interface from the VM.
"""
if port_db is None or port_db.get('id') is None:
LOG.warning("Port id is None! Cannot remove port "
"from hosting_device:%s", hosting_device_id)
return
hosting_port_id = port_db.hosting_info.hosting_port.id
try:
self._dev_mgr.svc_vm_mgr.interface_detach(hosting_device_id,
hosting_port_id)
self._gt_pool.spawn_n(self._cleanup_hosting_port, context,
hosting_port_id)
LOG.debug("Teardown logicalport completed for port:%s", port_db.id)
except Exception as e:
LOG.error("Failed to detach interface corresponding to port:"
"%(p_id)s on hosting device:%(hd_id)s due to "
"error %(error)s", {'p_id': hosting_port_id,
'hd_id': hosting_device_id,
'error': str(e)}) | Removes connectivity for a logical port.
Unplugs the corresponding data interface from the VM. | entailment |
def allocate_hosting_port(self, context, router_id, port_db, network_type,
hosting_device_id):
"""Allocates a hosting port for a logical port.
We create a hosting port for the router port
"""
l3admin_tenant_id = self._dev_mgr.l3_tenant_id()
hostingport_name = 'hostingport_' + port_db['id'][:8]
p_spec = {'port': {
'tenant_id': l3admin_tenant_id,
'admin_state_up': True,
'name': hostingport_name,
'network_id': port_db['network_id'],
'mac_address': bc.constants.ATTR_NOT_SPECIFIED,
'fixed_ips': [],
'device_id': '',
'device_owner': '',
'port_security_enabled': False}}
try:
hosting_port = self._core_plugin.create_port(context, p_spec)
except n_exc.NeutronException as e:
LOG.error('Error %s when creating hosting port'
'Cleaning up.', e)
self.delete_hosting_device_resources(
context, l3admin_tenant_id, hosting_port)
hosting_port = None
finally:
if hosting_port:
return {'allocated_port_id': hosting_port['id'],
'allocated_vlan': None}
else:
return None | Allocates a hosting port for a logical port.
We create a hosting port for the router port | entailment |
def disable(self, retain_port=False):
"""Teardown DHCP.
Disable DHCP for this network by updating the remote server
and then destroying any local device and namespace.
"""
self.update_server(disabled=True)
if retain_port:
return
self.update_device(disabled=True)
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg, self.network.namespace) | Teardown DHCP.
Disable DHCP for this network by updating the remote server
and then destroying any local device and namespace. | entailment |
def recover_devices(cls):
"""Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system.
"""
if "_devices" in globals():
return
global _devices
confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs))
for netid in os.listdir(confs_dir):
conf_dir = os.path.join(confs_dir, netid)
intf_filename = os.path.join(conf_dir, 'interface')
try:
with open(intf_filename, 'r') as f:
ifname = f.read()
_devices[netid] = ifname
except IOError:
LOG.error('Unable to read interface file: %s',
intf_filename)
LOG.debug("Recovered device %s for network %s'",
ifname, netid) | Track devices.
Creates global dict to track device names across driver invocations
and populates based on current devices configured on the system. | entailment |
def check_version(cls):
"""Checks server version against minimum required version."""
super(SimpleCpnrDriver, cls).check_version()
model.configure_pnr()
cls.recover_networks()
ver = model.get_version()
if ver < cls.MIN_VERSION:
LOG.warning("CPNR version does not meet minimum requirements, "
"expected: %(ever)f, actual: %(rver)f",
{'ever': cls.MIN_VERSION, 'rver': ver})
return ver | Checks server version against minimum required version. | entailment |
def existing_dhcp_networks(cls, conf):
"""Return a list of existing networks ids that we have configs for."""
global _networks
sup = super(SimpleCpnrDriver, cls)
superkeys = sup.existing_dhcp_networks(conf)
return set(_networks.keys()) & set(superkeys) | Return a list of existing networks ids that we have configs for. | entailment |
def _unsafe_update_server(self, disabled=False):
"""Update server with latest network configuration."""
id = self.network.id
net = model.Network.from_neutron(self.network)
if id not in _networks:
if disabled:
return
_networks[id] = net
_networks[id].create()
elif disabled:
_networks[id].delete()
del _networks[id]
else:
_networks[id].update(net)
_networks[id] = net | Update server with latest network configuration. | entailment |
def create_network(self, name, tenant_id, subnet, gw=None):
"""Create the openstack network, including the subnet. """
try:
body = {'network': {'name': name, 'tenant_id': tenant_id,
'admin_state_up': True}}
netw = self.neutronclient.create_network(body=body)
net_dict = netw.get('network')
net_id = net_dict.get('id')
except Exception as exc:
LOG.error("Failed to create network %(name)s, Exc %(exc)s",
{'name': name, 'exc': str(exc)})
return None, None
try:
if gw is None:
body = {'subnet': {'cidr': subnet,
'ip_version': 4,
'network_id': net_id,
'tenant_id': tenant_id,
'enable_dhcp': False}}
else:
body = {'subnet': {'cidr': subnet,
'ip_version': 4,
'network_id': net_id,
'tenant_id': tenant_id,
'enable_dhcp': False,
'gateway_ip': gw}}
subnet_ret = self.neutronclient.create_subnet(body=body)
subnet_dict = subnet_ret.get('subnet')
subnet_id = subnet_dict.get('id')
except Exception as exc:
LOG.error("Failed to create subnet %(sub)s, exc %(exc)s",
{'sub': subnet, 'exc': str(exc)})
try:
self.neutronclient.delete_network(net_id)
except Exception as exc:
LOG.error("Failed to delete network %(net)s, exc %(exc)s",
{'net': net_id, 'exc': str(exc)})
return None, None
return net_id, subnet_id | Create the openstack network, including the subnet. | entailment |
def delete_network(self, name, tenant_id, subnet_id, net_id):
"""Delete the openstack subnet and network. """
try:
self.neutronclient.delete_subnet(subnet_id)
except Exception as exc:
LOG.error("Failed to delete subnet %(sub)s exc %(exc)s",
{'sub': subnet_id, 'exc': str(exc)})
return
try:
self.neutronclient.delete_network(net_id)
except Exception as exc:
LOG.error("Failed to delete network %(name)s exc %(exc)s",
{'name': name, 'exc': str(exc)}) | Delete the openstack subnet and network. | entailment |
def delete_network_all_subnets(self, net_id):
"""Delete the openstack network including all its subnets. """
try:
body = {'network_id': net_id}
subnet_list = self.neutronclient.list_subnets(body=body)
subnet_list = subnet_list.get('subnets')
for subnet in subnet_list:
if subnet.get('network_id') == net_id:
subnet_id = subnet.get('id')
self.neutronclient.delete_subnet(subnet_id)
except Exception as exc:
LOG.error("Failed to delete subnet for net %(net)s "
"Exc %(exc)s", {'net': net_id, 'exc': str(exc)})
return False
try:
self.neutronclient.delete_network(net_id)
except Exception as exc:
LOG.error("Failed to delete network %(net)s Exc %(exc)s",
{'net': net_id, 'exc': str(exc)})
return False
return True | Delete the openstack network including all its subnets. | entailment |
def is_subnet_present(self, subnet_addr):
"""Returns if a subnet is present. """
try:
subnet_list = self.neutronclient.list_subnets(body={})
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if sub.get('cidr') == subnet_addr:
return True
return False
except Exception as exc:
LOG.error("Failed to list subnet %(sub)s, Exc %(exc)s",
{'sub': subnet_addr, 'exc': str(exc)})
return False | Returns if a subnet is present. | entailment |
def get_all_subnets_cidr(self, no_mask=False):
"""Returns all the subnets. """
body = {}
subnet_cidrs = []
try:
subnet_list = self.neutronclient.list_subnets(body=body)
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if no_mask:
subnet_cidrs.append(sub.get('cidr').split('/')[0])
else:
subnet_cidrs.append(sub.get('cidr'))
except Exception as exc:
LOG.error("Failed to list subnet Exc %s", str(exc))
return subnet_cidrs | Returns all the subnets. | entailment |
def get_subnets_for_net(self, net):
"""Returns the subnets in a network. """
try:
subnet_list = self.neutronclient.list_subnets(network_id=net)
subnet_dat = subnet_list.get('subnets')
return subnet_dat
except Exception as exc:
LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s",
{'net': net, 'exc': str(exc)})
return None | Returns the subnets in a network. | entailment |
def get_subnet_cidr(self, subnet_id):
"""retrieve the CIDR associated with a subnet, given its ID. """
try:
subnet_list = self.neutronclient.list_subnets(id=subnet_id)
subnet_dat = subnet_list.get('subnets')[0]
return subnet_dat.get('cidr')
except Exception as exc:
LOG.error("Failed to list subnet for ID %(subnet)s, "
"exc %(exc)s", {'subnet': subnet_id, 'exc': exc})
return None | retrieve the CIDR associated with a subnet, given its ID. | entailment |
def delete_network_subname(self, sub_name):
"""Delete the network by part of its name, use with caution. """
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
for net in net_list:
if net.get('name').find(sub_name) != -1:
self.delete_network_all_subnets(net.get('net_id'))
except Exception as exc:
LOG.error("Failed to get network by subname %(name)s, "
"Exc %(exc)s",
{'name': sub_name, 'exc': str(exc)}) | Delete the network by part of its name, use with caution. | entailment |
def get_network_by_name(self, nwk_name):
"""Search for a openstack network by name. """
ret_net_lst = []
try:
body = {}
net_list = self.neutronclient.list_networks(body=body)
net_list = net_list.get('networks')
for net in net_list:
if net.get('name') == nwk_name:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by name %(name)s, "
"Exc %(exc)s",
{'name': nwk_name, 'exc': str(exc)})
return ret_net_lst | Search for a openstack network by name. | entailment |
def get_network_by_tenant(self, tenant_id):
"""Returns the network of a given tenant. """
ret_net_lst = []
try:
net_list = self.neutronclient.list_networks(body={})
for net in net_list.get('networks'):
if net.get('tenant_id') == tenant_id:
ret_net_lst.append(net)
except Exception as exc:
LOG.error("Failed to get network by tenant %(tenant)s, "
"Exc %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
return ret_net_lst | Returns the network of a given tenant. | entailment |
def get_rtr_by_name(self, rtr_name):
"""Search a router by its name. """
upd_rtr_list = []
try:
rtr_list = self.neutronclient.list_routers()
for rtr in rtr_list.get('routers'):
if rtr_name == rtr['name']:
upd_rtr_list.append(rtr)
except Exception as exc:
LOG.error("Failed to get router by name %(name)s, "
"Exc %(exc)s",
{'name': rtr_name, 'exc': str(exc)})
return upd_rtr_list | Search a router by its name. | entailment |
def create_router(self, name, tenant_id, subnet_lst):
"""Create a openstack router and add the interfaces. """
try:
body = {'router': {'name': name, 'tenant_id': tenant_id,
'admin_state_up': True}}
router = self.neutronclient.create_router(body=body)
rout_dict = router.get('router')
rout_id = rout_dict.get('id')
except Exception as exc:
LOG.error("Failed to create router with name %(name)s"
" Exc %(exc)s", {'name': name, 'exc': str(exc)})
return None
ret = self.add_intf_router(rout_id, tenant_id, subnet_lst)
if not ret:
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s, Exc %(exc)s",
{'name': name, 'exc': str(exc)})
return None
return rout_id | Create a openstack router and add the interfaces. | entailment |
def add_intf_router(self, rout_id, tenant_id, subnet_lst):
"""Add the interfaces to a router. """
try:
for subnet_id in subnet_lst:
body = {'subnet_id': subnet_id}
intf = self.neutronclient.add_interface_router(rout_id,
body=body)
intf.get('port_id')
except Exception as exc:
LOG.error("Failed to create router intf ID %(id)s,"
" Exc %(exc)s", {'id': rout_id, 'exc': str(exc)})
return False
return True | Add the interfaces to a router. | entailment |
def delete_router(self, name, tenant_id, rout_id, subnet_lst):
"""Delete the openstack router.
Delete the router and remove the interfaces attached to it.
"""
ret = self.delete_intf_router(name, tenant_id, rout_id, subnet_lst)
if not ret:
return False
try:
ret = self.neutronclient.delete_router(rout_id)
except Exception as exc:
LOG.error("Failed to delete router %(name)s ret %(ret)s "
"Exc %(exc)s",
{'name': name, 'ret': str(ret), 'exc': str(exc)})
return False
return True | Delete the openstack router.
Delete the router and remove the interfaces attached to it. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.