code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
segment = context.bottom_bound_segment
if segment and self.check_segment(segment):
return segment.get(api.SEGMENTATION_ID) | def _get_vlanid(self, context) | Returns vlan_id associated with a bound VLAN segment. | 5.23627 | 3.979629 | 1.315768 |
segment = context.bottom_bound_segment
if segment and self.check_segment(segment):
return segment.get(api.PHYSICAL_NETWORK) | def _get_physnet(self, context) | Returns physnet associated with a bound VLAN segment. | 5.895156 | 4.154007 | 1.419149 |
vnic_type = context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
profile = context.current.get(bc.portbindings.PROFILE, {})
host_id = self._get_host_id(
context.current.get(bc.portbindings.HOST_ID))
if not host_id:
LOG.warning('Host id from port context is None. '
'Ignoring this port')
return
vlan_id = self._get_vlanid(context)
if not vlan_id:
LOG.warning('Vlan_id is None. Ignoring this port')
return
ucsm_ip = self.driver.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('Host %s is not controlled by any known '
'UCS Manager.', host_id)
return
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
# This is a neutron virtio port.
# If VNIC templates are configured, that config would
# take precedence and the VLAN is added to the VNIC template.
physnet = self._get_physnet(context)
if not physnet:
LOG.debug('physnet is None. Not modifying VNIC '
'Template config')
else:
# Check if VNIC template is configured for this physnet
ucsm = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip]
vnic_template = ucsm.vnic_template_list.get(physnet)
if vnic_template:
LOG.debug('vnic_template %s', vnic_template)
self.ucsm_db.add_vnic_template(vlan_id, ucsm_ip,
vnic_template.name, physnet)
return
else:
LOG.debug('VNIC Template not configured for '
'physnet %s', physnet)
# In the absence of VNIC Templates, VLAN is directly added
# to vNIC(s) on the SP Template.
# Check if SP Template config has been provided. If so, find
# the UCSM that controls this host and the Service Profile
# Template for this host.
sp_template_info = (CONF.ml2_cisco_ucsm.ucsms[
ucsm_ip].sp_template_list.get(host_id))
if sp_template_info:
LOG.debug('SP Template: %s, VLAN_id: %d',
sp_template_info.name, vlan_id)
self.ucsm_db.add_service_profile_template(
vlan_id, sp_template_info.name, ucsm_ip)
return
# If this is an Intel SR-IOV vnic, then no need to create port
# profile on the UCS manager. So no need to update the DB.
if not self.driver.is_vmfex_port(profile):
LOG.debug('This is a SR-IOV port and hence not updating DB.')
return
# This is a Cisco VM-FEX port
p_profile_name = self.make_profile_name(vlan_id)
LOG.debug('Port Profile: %s for VLAN_id: %d', p_profile_name, vlan_id)
# Create a new port profile entry in the db
self.ucsm_db.add_port_profile(p_profile_name, vlan_id, ucsm_ip) | def update_port_precommit(self, context) | Adds port profile and vlan information to the DB.
Assign a port profile to this port. To do that:
1. Get the vlan_id associated with the bound segment
2. Check if a port profile already exists for this vlan_id
3. If yes, associate that port profile with this port.
4. If no, create a new port profile with this vlan_id and
associate with this port | 3.432251 | 3.410026 | 1.006518 |
segments = context.network_segments
for segment in segments:
if not self.check_segment(segment):
return # Not a vlan network
vlan_id = segment.get(api.SEGMENTATION_ID)
if not vlan_id:
return # No vlan assigned to segment
# For VM-FEX ports
self.ucsm_db.delete_vlan_entry(vlan_id)
# For Neutron virtio ports
if any([True for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if ucsm.sp_template_list]):
# At least on UCSM has sp templates configured
self.ucsm_db.delete_sp_template_for_vlan(vlan_id)
if any([True for ip, ucsm in CONF.ml2_cisco_ucsm.ucsms.items()
if ucsm.vnic_template_list]):
# At least one UCSM has vnic templates configured
self.ucsm_db.delete_vnic_template_for_vlan(vlan_id) | def delete_network_precommit(self, context) | Delete entry corresponding to Network's VLAN in the DB. | 3.88118 | 3.757789 | 1.032836 |
segments = context.network_segments
network_name = context.current['name']
for segment in segments:
if not self.check_segment(segment):
return # Not a vlan network
vlan_id = segment.get(api.SEGMENTATION_ID)
if not vlan_id:
return # No vlan assigned to segment
port_profile = self.make_profile_name(vlan_id)
trunk_vlans = (
CONF.sriov_multivlan_trunk.network_vlans.get(network_name, []))
self.driver.delete_all_config_for_vlan(vlan_id, port_profile,
trunk_vlans) | def delete_network_postcommit(self, context) | Delete all configuration added to UCS Manager for the vlan_id. | 4.404006 | 4.013066 | 1.097417 |
vnic_type = context.current.get(bc.portbindings.VNIC_TYPE,
bc.portbindings.VNIC_NORMAL)
LOG.debug('Attempting to bind port %(port)s with vnic_type '
'%(vnic_type)s on network %(network)s ',
{'port': context.current['id'],
'vnic_type': vnic_type,
'network': context.network.current['id']})
profile = context.current.get(bc.portbindings.PROFILE, {})
if not self.driver.check_vnic_type_and_vendor_info(vnic_type,
profile):
return
for segment in context.network.network_segments:
if self.check_segment(segment):
vlan_id = segment[api.SEGMENTATION_ID]
if not vlan_id:
LOG.warning('Cannot bind port: vlan_id is None.')
return
LOG.debug("Port binding to Vlan_id: %s", str(vlan_id))
# Check if this is a Cisco VM-FEX port or Intel SR_IOV port
if self.driver.is_vmfex_port(profile):
profile_name = self.make_profile_name(vlan_id)
self.vif_details[
const.VIF_DETAILS_PROFILEID] = profile_name
else:
self.vif_details[
bc.portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
bc.constants.PORT_STATUS_ACTIVE)
return
LOG.error('UCS Mech Driver: Failed binding port ID %(id)s '
'on any segment of network %(network)s',
{'id': context.current['id'],
'network': context.network.current['id']}) | def bind_port(self, context) | Binds port to current network segment.
Binds port only if the vnic_type is direct or macvtap and
the port is from a supported vendor. While binding port set it
in ACTIVE state and provide the Port Profile or Vlan Id as part
vif_details. | 2.99136 | 2.934931 | 1.019227 |
if rule_info.get('status') == 'up':
self.add_rule_entry(rule_info)
if rule_info.get('status') == 'down':
self.remove_rule_entry(rule_info) | def update_rule_entry(self, rule_info) | Update the rule_info list. | 2.490479 | 2.339372 | 1.064593 |
new_rule = IpMacPort(rule_info.get('ip'), rule_info.get('mac'),
rule_info.get('port'))
LOG.debug('Added rule info %s to the list', rule_info)
self.rule_info.append(new_rule) | def add_rule_entry(self, rule_info) | Add host data object to the rule_info list. | 4.36234 | 3.57754 | 1.219369 |
temp_list = list(self.rule_info)
for rule in temp_list:
if (rule.ip == rule_info.get('ip') and
rule.mac == rule_info.get('mac') and
rule.port == rule_info.get('port')):
LOG.debug('Removed rule info %s from the list', rule_info)
self.rule_info.remove(rule) | def remove_rule_entry(self, rule_info) | Remove host data object from rule_info list. | 3.003229 | 2.58162 | 1.163312 |
ipt_cmd = ['iptables', '-t', 'filter', '-S']
cmdo = dsl.execute(ipt_cmd, root_helper=self._root_helper,
log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
chain = o.split()[1]
LOG.info('Find %(chain)s for %(mac)s.',
{'chain': chain, 'mac': mac})
return chain | def _find_chain_name(self, mac) | Find a rule associated with a given mac. | 4.174482 | 3.936304 | 1.060508 |
ipt_cmd = ['iptables', '-L', '--line-numbers']
cmdo = dsl.execute(ipt_cmd, self._root_helper, log_output=False)
for o in cmdo.split('\n'):
if mac in o.lower():
rule_no = o.split()[0]
LOG.info('Found rule %(rule)s for %(mac)s.',
{'rule': rule_no, 'mac': mac})
return rule_no | def _find_rule_no(self, mac) | Find rule number associated with a given mac. | 3.889312 | 3.673742 | 1.058679 |
rule_no = self._find_rule_no(mac)
chain = self._find_chain_name(mac)
if not rule_no or not chain:
LOG.error('Failed to update ip rule for %(ip)s %(mac)s',
{'ip': ip, 'mac': mac})
return
update_cmd = ['iptables', '-R', '%s' % chain, '%s' % rule_no,
'-s', '%s/32' % ip, '-m', 'mac', '--mac-source',
'%s' % mac, '-j', 'RETURN']
LOG.debug('Execute command: %s', update_cmd)
dsl.execute(update_cmd, self._root_helper, log_output=False) | def update_ip_rule(self, ip, mac) | Update a rule associated with given ip and mac. | 3.131274 | 3.200952 | 0.978232 |
LOG.debug('Enqueue iptable event %s.', event)
if event.get('status') == 'up':
for rule in self.rule_info:
if (rule.mac == event.get('mac').lower() and
rule.port == event.get('port')):
# Entry already exist in the list.
if rule.ip != event.get('ip'):
LOG.debug('enqueue_event: Only updating IP from %s'
' to %s.' % (rule.ip, event.get('ip')))
# Only update the IP address if it is different.
rule.ip = event.get('ip')
return
self._iptq.put(event) | def enqueue_event(self, event) | Enqueue the given event.
The event contains host data (ip, mac, port) which will be used to
update the spoofing rule for the host in the iptables. | 4.732841 | 4.057868 | 1.166337 |
# Read the iptables
iptables_cmds = ['iptables-save', '-c']
all_rules = dsl.execute(iptables_cmds, root_helper=self._root_helper,
log_output=False)
# For each rule in rule_info update the rule if necessary.
new_rules = []
is_modified = False
for line in all_rules.split('\n'):
new_line = line
line_content = line.split()
# The spoofing rule which includes mac and ip should have
# -s cidr/32 option for ip address. Otherwise no rule
# will be modified.
if '-s' in line_content:
tmp_rule_info = list(self.rule_info)
for rule in tmp_rule_info:
if (rule.mac in line.lower() and
rule.chain.lower() in line.lower() and
not self._is_ip_in_rule(rule.ip, line_content)):
ip_loc = line_content.index('-s') + 1
line_content[ip_loc] = rule.ip + '/32'
new_line = ' '.join(line_content)
LOG.debug('Modified %(old_rule)s. '
'New rule is %(new_rule)s.' % (
{'old_rule': line,
'new_rule': new_line}))
is_modified = True
new_rules.append(new_line)
if is_modified and new_rules:
# Updated all the rules. Now commit the new rules.
iptables_cmds = ['iptables-restore', '-c']
dsl.execute(iptables_cmds, process_input='\n'.join(new_rules),
root_helper=self._root_helper, log_output=False) | def update_iptables(self) | Update iptables based on information in the rule_info. | 3.678742 | 3.526819 | 1.043076 |
while True:
try:
event = self._iptq.get(block=False)
LOG.debug('Dequeue event: %s.', event)
self.update_rule_entry(event)
except queue.Empty:
self.update_iptables()
time.sleep(1)
except Exception:
LOG.exception('ERROR: failed to process queue') | def process_rule_info(self) | Task responsible for processing event queue. | 5.838861 | 4.944797 | 1.180809 |
params = kwargs.get('params')
LOG.info("asa_setup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
" %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s",
{'tenant': params.get('tenant_name'),
'in_vlan': params.get('in_vlan'),
'out_vlan': params.get('out_vlan'),
'in_ip': params.get('in_ip'),
'in_mask': params.get('in_mask'),
'out_ip': params.get('out_ip'),
'out_mask': params.get('out_mask')})
inside_vlan = str(params.get('in_vlan'))
outside_vlan = str(params.get('out_vlan'))
context = params.get('tenant_name')
cmds = ["conf t", "changeto system"]
inside_int = params.get('intf_in') + '.' + inside_vlan
cmds.append("int " + inside_int)
cmds.append("vlan " + inside_vlan)
outside_int = params.get('intf_out') + '.' + outside_vlan
cmds.append("int " + outside_int)
cmds.append("vlan " + outside_vlan)
cmds.append("context " + context)
cmds.append("allocate-interface " + inside_int)
cmds.append("allocate-interface " + outside_int)
cmds.append("config-url disk0:/" + context + ".cfg")
cmds.append("write memory")
cmds.append("changeto context " + context)
cmds.append("int " + inside_int)
cmds.append("nameif Inside")
cmds.append("security-level 100")
cmds.append(
"ip address " + params.get('in_ip') + " " + params.get('in_mask'))
cmds.append("int " + outside_int)
cmds.append("nameif Outside")
cmds.append("security-level 0")
cmds.append("ip address " + params.get('out_ip') + " " +
params.get('out_mask'))
cmds.append("router ospf 1")
cmds.append("network " + params.get('in_ip') + " " +
params.get('in_mask') + " area 0")
cmds.append("network " + params.get('out_ip') + " " +
params.get('out_mask') + " area 0")
cmds.append("area 0")
cmds.append("route Outside 0.0.0.0 0.0.0.0 " + params.get('out_gw') +
" 1")
cmds.append("route Outside 0.0.0.0 0.0.0.0 " +
params.get('out_sec_gw') + " 1")
cmds.append("end")
cmds.append("write memory")
if context not in self.tenant_rule:
self.tenant_rule[context] = dict()
self.tenant_rule[context]['rule_lst'] = []
data = {"commands": cmds}
return self.rest_send_cli(data) | def setup(self, **kwargs) | setup ASA context for an edge tenant pair. | 2.264769 | 2.194571 | 1.031987 |
params = kwargs.get('params')
LOG.info("asa_cleanup: tenant %(tenant)s %(in_vlan)d %(out_vlan)d"
" %(in_ip)s %(in_mask)s %(out_ip)s %(out_mask)s",
{'tenant': params.get('tenant_name'),
'in_vlan': params.get('in_vlan'),
'out_vlan': params.get('out_vlan'),
'in_ip': params.get('in_ip'),
'in_mask': params.get('in_mask'),
'out_ip': params.get('out_ip'),
'out_mask': params.get('out_mask')})
inside_vlan = str(params.get('in_vlan'))
outside_vlan = str(params.get('out_vlan'))
context = params.get('tenant_name')
cmds = ["conf t", "changeto system"]
cmds.append("no context " + context + " noconfirm")
inside_int = params.get('intf_in') + '.' + inside_vlan
outside_int = params.get('intf_out') + '.' + outside_vlan
cmds.append("no interface " + inside_int)
cmds.append("no interface " + outside_int)
cmds.append("write memory")
cmds.append("del /noconfirm disk0:/" + context + ".cfg")
if context in self.tenant_rule:
for rule in self.tenant_rule[context].get('rule_lst'):
del self.rule_tbl[rule]
del self.tenant_rule[context]
data = {"commands": cmds}
return self.rest_send_cli(data) | def cleanup(self, **kwargs) | cleanup ASA context for an edge tenant pair. | 3.084133 | 2.91243 | 1.058955 |
"Build the acl for IP address. "
if str(network_obj) == '0.0.0.0/0':
acl = "any "
else:
acl = "%(ip)s %(mask)s " % {'ip': network_obj.network,
'mask': network_obj.netmask}
return acl | def build_acl_ip(self, network_obj) | Build the acl for IP address. | 4.507873 | 3.89697 | 1.156764 |
"Build the acl for L4 Ports. "
if port is not None:
if ':' in port:
range = port.replace(':', ' ')
acl = "range %(range)s " % {'range': range}
else:
acl = "eq %(port)s " % {'port': port}
if not enabled:
acl += "inactive"
return acl | def build_acl_port(self, port, enabled=True) | Build the acl for L4 Ports. | 5.568735 | 4.096563 | 1.359367 |
# TODO(padkrish) actions that is not deny or allow, throw error
if rule['action'] == 'allow':
action = 'permit'
else:
action = 'deny'
acl_str = "access-list %(tenant)s extended %(action)s %(prot)s "
acl = acl_str % {'tenant': tenant_name, 'action': action,
'prot': rule.get('protocol')}
src_ip = self.get_ip_address(rule.get('source_ip_address'))
ip_acl = self.build_acl_ip(src_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('source_port'))
dst_ip = self.get_ip_address(rule.get('destination_ip_address'))
ip_acl = self.build_acl_ip(dst_ip)
acl += ip_acl
acl += self.build_acl_port(rule.get('destination_port'),
enabled=rule.get('enabled'))
return acl | def build_acl(self, tenant_name, rule) | Build the ACL. | 2.607799 | 2.525938 | 1.032408 |
tenant_name = policy['tenant_name']
fw_id = policy['fw_id']
fw_name = policy['fw_name']
LOG.info("asa_apply_policy: tenant=%(tenant)s fw_id=%(fw_id)s "
"fw_name=%(fw_name)s",
{'tenant': tenant_name, 'fw_id': fw_id, 'fw_name': fw_name})
cmds = ["conf t", "changeto context " + tenant_name]
for rule_id, rule in policy['rules'].items():
acl = self.build_acl(tenant_name, rule)
LOG.info("rule[%(rule_id)s]: name=%(name)s enabled=%(enabled)s"
" protocol=%(protocol)s dport=%(dport)s "
"sport=%(sport)s dip=%(dport)s "
"sip=%(sip)s action=%(dip)s",
{'rule_id': rule_id, 'name': rule.get('name'),
'enabled': rule.get('enabled'),
'protocol': rule.get('protocol'),
'dport': rule.get('dst_port'),
'sport': rule.get('src_port'),
'dip': rule.get('destination_ip_address'),
'sip': rule.get('source_ip_address'),
'action': rule.get('action')})
# remove the old ace for this rule
if rule_id in self.rule_tbl:
cmds.append('no ' + self.rule_tbl[rule_id])
self.rule_tbl[rule_id] = acl
if tenant_name in self.tenant_rule:
if rule_id not in self.tenant_rule[tenant_name]['rule_lst']:
self.tenant_rule[tenant_name]['rule_lst'].append(rule_id)
cmds.append(acl)
cmds.append("access-group " + tenant_name + " global")
cmds.append("write memory")
LOG.info("cmds sent is %s", cmds)
data = {"commands": cmds}
return self.rest_send_cli(data) | def apply_policy(self, policy) | Apply a firewall policy. | 2.67874 | 2.61606 | 1.02396 |
integ_flow = self.integ_br_obj.dump_flows_for(
in_port=self.int_peer_port_num)
ext_flow = self.ext_br_obj.dump_flows_for(
in_port=self.phy_peer_port_num)
for net_uuid, lvm in six.iteritems(self.local_vlan_map):
vdp_vlan = lvm.any_consistent_vlan()
flow_required = False
if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)):
return
if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on Integ bridge",
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan):
LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan "
"%(lvid)s not present on External bridge",
{'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid})
flow_required = True
if flow_required:
LOG.info("Programming flows for lvid %(lvid)s vdp vlan"
" %(vdp)s",
{'lvid': lvm.lvid, 'vdp': vdp_vlan})
self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan) | def _flow_check_handler_internal(self) | Periodic handler to check if installed flows are present.
This handler runs periodically to check if installed flows are present.
This function cannot detect and delete the stale flows, if present.
It requires more complexity to delete stale flows. Generally, stale
flows are not present. So, that logic is not put here. | 2.82094 | 2.721045 | 1.036712 |
LOG.info("In _flow_check_handler")
try:
with self.ovs_vdp_lock:
self._flow_check_handler_internal()
except Exception as e:
LOG.error("Exception in _flow_check_handler_internal %s",
str(e)) | def _flow_check_handler(self) | Top level routine to check OVS flow consistency. | 3.937389 | 3.285296 | 1.198488 |
avl_len = constants.MAX_VETH_NAME - len(const_str)
if avl_len <= 0:
LOG.error("veth string name too short")
raise dfae.DfaAgentFailed(reason="Veth Unavailable")
start_pos = len(intf_str) - avl_len
veth_str = const_str + intf_str[start_pos:]
return veth_str | def gen_veth_str(self, const_str, intf_str) | Generate a veth string.
Concatenates the constant string with remaining available length
of interface string from trailing position. | 5.395739 | 4.968063 | 1.086085 |
phy_port_list = self.ext_br_obj.get_port_name_list()
int_port_list = self.integ_br_obj.get_port_name_list()
for port in phy_port_list:
# Use get Interface xxx type
is_patch = ovs_lib.is_patch(self.root_helper, port)
if is_patch:
# Get the peer for this patch
peer_port = ovs_lib.get_peer(self.root_helper, port)
if peer_port in int_port_list:
return port, peer_port
# A solution is needed for veth pairs also, fixme(padkrish)
# ip_wrapper.get_devices() returns all the devices
# Pick the ones whose type is veth (?) and get the other pair
# Combination of "ethtool -S xxx" command and "ip tool" command.
return None, None | def find_interconnect_ports(self) | Find the internal veth or patch ports. | 7.203778 | 6.564077 | 1.097455 |
lldpad_port = self.lldpad_info
if not lldpad_port:
fail_reason = "There is no LLDPad port available."
LOG.error("%s", fail_reason)
return {'result': False, 'fail_reason': fail_reason}
if status == 'up':
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
port_name = self.ext_br_obj.get_ofport_name(port_uuid)
if port_name is None:
fail_reason = "Unknown portname for uuid %s" % (port_uuid)
LOG.error("%s", fail_reason)
return {'result': False, 'fail_reason': fail_reason}
LOG.info("Status up: portname for uuid %(uuid)s is %(port)s",
{'uuid': port_uuid, 'port': port_name})
ret = self.port_up_segment_mode(lldpad_port, port_name,
port_uuid, mac, net_uuid,
segmentation_id, oui)
else:
if self.vdp_mode == constants.VDP_SEGMENT_MODE:
LOG.info("Status down for portname uuid %s", port_uuid)
ret = self.port_down_segment_mode(lldpad_port, port_uuid,
mac, net_uuid,
segmentation_id, oui)
return ret | def send_vdp_port_event_internal(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui) | Send vNIC UP/Down event to VDP.
:param port_uuid: a ovslib.VifPort object.
:mac: MAC address of the VNIC
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down'
:oui: OUI Parameters | 2.577329 | 2.583379 | 0.997658 |
try:
with self.ovs_vdp_lock:
ret = self.send_vdp_port_event_internal(port_uuid, mac,
net_uuid,
segmentation_id,
status, oui)
return ret
except Exception as e:
LOG.error("Exception in send_vdp_port_event %s" % str(e))
return {'result': False, 'fail_reason': str(e)} | def send_vdp_port_event(self, port_uuid, mac, net_uuid,
segmentation_id, status, oui) | Send vNIC UP/Down event to VDP.
:param port: a ovslib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param status: Type of port event. 'up' or 'down' | 2.248973 | 2.60591 | 0.863028 |
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error("lvm not yet created, get_lvid_vdp_lan "
"return error")
return cconstants.INVALID_VLAN, cconstants.INVALID_VLAN
vdp_vlan = lvm.get_portid_vlan(port_uuid)
lvid = lvm.lvid
LOG.info("Return from lvid_vdp_vlan lvid %(lvid)s vdp_vlan %(vdp)s",
{'lvid': lvid, 'vdp': vdp_vlan})
return lvid, vdp_vlan | def get_lvid_vdp_vlan(self, net_uuid, port_uuid) | Retrieve the Local Vlan ID and VDP Vlan. | 3.056312 | 3.046632 | 1.003177 |
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error("Cannot unprovision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid ",
{'net_uuid': net_uuid})
return
LOG.info('unprovision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s',
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, vdp_vlan, 0) | def unprovision_vdp_overlay_networks(self, net_uuid, lvid, vdp_vlan, oui) | Unprovisions a overlay type network configured using VDP.
:param net_uuid: the uuid of the network associated with this vlan.
:lvid: Local VLAN ID
:vdp_vlan: VDP VLAN ID
:oui: OUI Parameters | 3.777068 | 3.855161 | 0.979743 |
LOG.debug("In VDP VLAN change VLAN %s", vdp_vlan)
if not vsw_cb_data:
LOG.error("NULL vsw_cb_data Info received")
return
net_uuid = vsw_cb_data.get('net_uuid')
port_uuid = vsw_cb_data.get('port_uuid')
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
LOG.error("Network %s is not in the local vlan map", net_uuid)
return
lldpad_port = self.lldpad_info
if not lldpad_port:
LOG.error("There is no LLDPad port available.")
return
exist_vdp_vlan = lvm.late_binding_vlan
lvid = lvm.vlan
LOG.debug("lvid %(lvid)s exist %(vlan)s",
{'lvid': lvid, 'vlan': exist_vdp_vlan})
lvm.decr_reset_vlan(port_uuid, vdp_vlan)
lvm.set_fail_reason(port_uuid, fail_reason)
self.vdp_vlan_cb(port_uuid, lvid, vdp_vlan, fail_reason)
if vdp_vlan == exist_vdp_vlan:
LOG.debug("No change in provider VLAN %s", vdp_vlan)
return
# Logic is if the VLAN changed to 0, clear the flows only if none of
# the VM's in the network has a valid VLAN.
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
if ovs_lib.is_valid_vlan_tag(exist_vdp_vlan) and not (
lvm.any_valid_vlan()):
# Clear the old flows
LOG.debug("Clearing flows, no valid vlans")
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, 0)
lvm.late_binding_vlan = 0
lvm.vdp_nego_req = False
else:
# If any VM gets a VLAN change, we immediately modify the flow.
# This is done to not wait for all VM's VLAN getting updated from
# switch. Logic is if any VM gts a new VLAN, the other VM's of the
# same network will be updated eventually.
if vdp_vlan != exist_vdp_vlan and (
ovs_lib.is_valid_vlan_tag(vdp_vlan)):
# Add the new flows and remove the old flows
LOG.warning("Non Zero VDP Vlan change %s %s" %
(vdp_vlan, exist_vdp_vlan))
self.program_vm_ovs_flows(lvid, exist_vdp_vlan, vdp_vlan)
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False
else:
LOG.error("Invalid or same VLAN Exist %(exist)s "
"New %(new)s VLANs",
{'exist': exist_vdp_vlan, 'new': vdp_vlan}) | def vdp_vlan_change_internal(self, vsw_cb_data, vdp_vlan, fail_reason) | Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads. | 3.676983 | 3.64182 | 1.009655 |
LOG.debug("In VDP VLAN change VLAN %s" % vdp_vlan)
try:
with self.ovs_vdp_lock:
self.vdp_vlan_change_internal(vsw_cb_data, vdp_vlan,
fail_reason)
except Exception as e:
LOG.error("Exception in vdp_vlan_change %s" % str(e)) | def vdp_vlan_change(self, vsw_cb_data, vdp_vlan, fail_reason) | Callback Function from VDP when provider VLAN changes.
This will be called only during error cases when switch
reloads or when compute reloads. | 2.572798 | 2.71556 | 0.947428 |
lldpad_port = self.lldpad_info
if lldpad_port:
ovs_cb_data = {'obj': self, 'port_uuid': port_uuid, 'mac': mac,
'net_uuid': net_uuid}
vdp_vlan, fail_reason = lldpad_port.send_vdp_vnic_up(
port_uuid=port_uuid, vsiid=port_uuid, gid=segmentation_id,
mac=mac, new_network=True, oui=oui,
vsw_cb_fn=self.vdp_vlan_change, vsw_cb_data=ovs_cb_data)
else:
fail_reason = "There is no LLDPad port available."
LOG.error("%s", fail_reason)
return {'result': False, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
# check validity
if not ovs_lib.is_valid_vlan_tag(vdp_vlan):
LOG.error("Cannot provision VDP Overlay network for"
" net-id=%(net_uuid)s - Invalid ",
{'net_uuid': net_uuid})
return {'result': True, 'vdp_vlan': cconstants.INVALID_VLAN,
'fail_reason': fail_reason}
LOG.info('provision_vdp_overlay_networks: add_flow for '
'Local Vlan %(local_vlan)s VDP VLAN %(vdp_vlan)s',
{'local_vlan': lvid, 'vdp_vlan': vdp_vlan})
self.program_vm_ovs_flows(lvid, 0, vdp_vlan)
return {'result': True, 'vdp_vlan': vdp_vlan, 'fail_reason': None} | def provision_vdp_overlay_networks(self, port_uuid, mac, net_uuid,
segmentation_id, lvid, oui) | Provisions a overlay type network configured using VDP.
:param port_uuid: the uuid of the VM port.
:param mac: the MAC address of the VM.
:param net_uuid: the uuid of the network associated with this vlan.
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:lvid: Local VLAN ID
:oui: OUI Parameters | 3.354029 | 3.462558 | 0.968657 |
LOG.info("Populating the OVS VDP cache with port %(port_uuid)s, "
"mac %(mac)s net %(net_uuid)s lvid %(lvid)s vdpvlan "
"%(vdp_vlan)s seg %(seg)s",
{'port_uuid': port_uuid, 'mac': mac, 'net_uuid': net_uuid,
'lvid': lvid, 'vdp_vlan': vdp_vlan, 'seg': segmentation_id})
lvm = self.local_vlan_map.get(net_uuid)
if not lvm:
lvm = LocalVlan(lvid, segmentation_id)
self.local_vlan_map[net_uuid] = lvm
lvm.lvid = lvid
lvm.set_port_uuid(port_uuid, vdp_vlan, None)
if vdp_vlan != cconstants.INVALID_VLAN:
lvm.late_binding_vlan = vdp_vlan
lvm.vdp_nego_req = False | def pop_local_cache(self, port_uuid, mac, net_uuid, lvid, vdp_vlan,
segmentation_id) | Populate the local cache after restart. | 2.476991 | 2.427003 | 1.020596 |
self.dummy_net_id = net_id
self.dummy_subnet_id = subnet_id
self.dummy_router_id = rtr_id | def store_dummy_router_net(self, net_id, subnet_id, rtr_id) | Storing the router attributes. | 1.933325 | 1.744038 | 1.108533 |
if direc == 'in':
self.in_dcnm_net_dict = net_dict
else:
self.out_dcnm_net_dict = net_dict | def store_dcnm_net_dict(self, net_dict, direc) | Storing the DCNM net dict. | 2.001949 | 1.954862 | 1.024087 |
if not subnet_dict:
return
alloc_pool = subnet_dict.get('allocation_pools')
cidr = subnet_dict.get('cidr')
subnet = cidr.split('/')[0]
start = alloc_pool[0].get('start')
end = alloc_pool[0].get('end')
gateway = subnet_dict.get('gateway_ip')
sec_gateway = subnet_dict.get('secondary_gw')
return {'subnet': subnet, 'start': start, 'end': end,
'gateway': gateway, 'sec_gateway': sec_gateway} | def _parse_subnet(self, subnet_dict) | Return the subnet, start, end, gateway of a subnet. | 2.119968 | 1.947698 | 1.088448 |
if direc == 'in':
self.in_dcnm_subnet_dict = subnet_dict
self.in_subnet_dict = self._parse_subnet(subnet_dict)
else:
self.out_dcnm_subnet_dict = subnet_dict
self.out_subnet_dict = self._parse_subnet(subnet_dict) | def store_dcnm_subnet_dict(self, subnet_dict, direc) | Store the subnet attributes and dict. | 1.812296 | 1.775862 | 1.020516 |
fw_dict = {'fw_id': fw_id, 'name': fw_name, 'tenant_id': tenant_id}
# FW DB is already created by FW Mgr
# self.add_fw_db(fw_id, fw_dict)
self.update_fw_dict(fw_dict) | def create_fw_db(self, fw_id, fw_name, tenant_id) | Create FW dict. | 3.269009 | 3.152374 | 1.036999 |
del self.fw_dict
del self.in_dcnm_net_dict
del self.in_dcnm_subnet_dict
del self.out_dcnm_net_dict
del self.out_dcnm_subnet_dict | def destroy_local_fw_db(self) | Delete the FW dict and its attributes. | 3.612578 | 3.045282 | 1.186287 |
fw_dict = self.get_fw_dict()
if direc == 'in':
fw_dict.update({'in_network_id': net, 'in_service_ip': start})
else:
fw_dict.update({'out_network_id': net, 'out_service_ip': start})
self.update_fw_dict(fw_dict) | def update_fw_local_cache(self, net, direc, start) | Update the fw dict with Net ID and service IP. | 2.492311 | 1.924795 | 1.294844 |
fw_dict = self.get_fw_dict()
if os_result is not None:
fw_dict['os_status'] = os_result
if dcnm_result is not None:
fw_dict['dcnm_status'] = dcnm_result
if dev_result is not None:
fw_dict['dev_status'] = dev_result
self.update_fw_dict(fw_dict) | def update_fw_local_result_str(self, os_result=None, dcnm_result=None,
dev_result=None) | Update the FW result in the dict. | 1.603324 | 1.523283 | 1.052545 |
self.update_fw_local_result_str(os_result=os_result,
dcnm_result=dcnm_result,
dev_result=dev_result) | def update_fw_local_result(self, os_result=None, dcnm_result=None,
dev_result=None) | Retrieve and update the FW result in the dict. | 2.276167 | 2.158749 | 1.054392 |
fw_dict = self.get_fw_dict()
fw_dict.update({'router_id': router_id, 'router_net_id': net_id,
'router_subnet_id': subnet_id})
self.store_dummy_router_net(net_id, subnet_id, router_id)
self.update_fw_local_result(os_result=os_result) | def update_fw_local_router(self, net_id, subnet_id, router_id, os_result) | Update the FW with router attributes. | 2.969695 | 2.959782 | 1.003349 |
fw_dict = self.get_fw_dict()
self.update_fw_db(fw_dict.get('fw_id'), fw_dict) | def commit_fw_db(self) | Calls routine to update the FW DB. | 3.912987 | 3.351754 | 1.167445 |
fw_dict = self.get_fw_dict()
self.update_fw_db_result(fw_dict.get('fw_id'), fw_dict) | def commit_fw_db_result(self) | Calls routine to update the FW create/delete result in DB. | 3.992172 | 3.292087 | 1.212657 |
fw_dict = self.get_fw_dict()
fw_data, fw_data_dict = self.get_fw(fw_dict.get('fw_id'))
res = fw_data.result
self.store_local_final_result(res) | def get_store_local_final_result(self) | Store/Retrieve the final result.
Retrieve the final result for FW create/delete from DB and store it
locally. | 4.809396 | 4.226293 | 1.13797 |
self.state = state
if popl_db:
fw_dict = self.get_fw_dict()
self.append_state_final_result(fw_dict.get('fw_id'),
self.get_local_final_result(),
state) | def store_state(self, state, popl_db=True) | Store the state of FW create/del operation. | 6.302794 | 5.27702 | 1.194385 |
result = self.get_local_final_result()
if from_str == fw_const.FW_CR_OP:
if result == fw_const.RESULT_FW_DELETE_INIT:
return state + 1
if from_str == fw_const.FW_DEL_OP:
if result == fw_const.RESULT_FW_CREATE_INIT:
return state - 1
return state | def fixup_state(self, from_str, state) | Fixup state after retart.
Fixup the state, if Delete is called when create SM is half-way
through. | 4.679002 | 3.998457 | 1.170202 |
cls.ip_db_obj['in'] = in_obj
cls.ip_db_obj['out'] = out_obj | def store_db_obj(cls, in_obj, out_obj) | Store the IP DB object. | 3.911824 | 2.740037 | 1.427653 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_in_ip_addr() | def get_in_ip_addr(cls, tenant_id) | Retrieves the 'in' service subnet attributes. | 3.489512 | 3.254436 | 1.072232 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_ip_addr() | def get_out_ip_addr(cls, tenant_id) | Retrieves the 'out' service subnet attributes. | 3.424413 | 3.211678 | 1.066238 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
in_subnet_dict = tenant_obj.get_in_ip_addr()
next_hop = str(netaddr.IPAddress(in_subnet_dict.get('subnet')) + 2)
return next_hop | def get_in_srvc_node_ip_addr(cls, tenant_id) | Retrieves the IN service node IP address. | 3.527085 | 3.345269 | 1.05435 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
out_subnet_dict = tenant_obj.get_out_ip_addr()
next_hop = str(netaddr.IPAddress(out_subnet_dict.get('subnet')) + 2)
return next_hop | def get_out_srvc_node_ip_addr(cls, tenant_id) | Retrieves the OUT service node IP address. | 3.470556 | 3.317264 | 1.04621 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_dummy_router_net() | def get_dummy_router_net(cls, tenant_id) | Retrieves the dummy router network info. | 3.779703 | 3.321587 | 1.137921 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None, None
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_in_seg_vlan() | def get_in_seg_vlan(cls, tenant_id) | Retrieves the IN Seg, VLAN, mob domain. | 3.441624 | 3.197344 | 1.076401 |
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None, None
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_seg_vlan() | def get_out_seg_vlan(cls, tenant_id) | Retrieves the OUT Seg, VLAN, mob domain. | 3.358852 | 3.159899 | 1.062962 |
if 'in' not in cls.ip_db_obj:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None
db_obj = cls.ip_db_obj.get('in')
in_subnet_dict = cls.get_in_ip_addr(tenant_id)
sub = db_obj.get_subnet(in_subnet_dict.get('subnet'))
return sub.subnet_id | def get_in_subnet_id(cls, tenant_id) | Retrieve the subnet ID of IN network. | 4.486585 | 4.159001 | 1.078765 |
if 'out' not in cls.ip_db_obj:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None
db_obj = cls.ip_db_obj.get('out')
out_subnet_dict = cls.get_out_ip_addr(tenant_id)
sub = db_obj.get_subnet(out_subnet_dict.get('subnet'))
return sub.subnet_id | def get_out_subnet_id(cls, tenant_id) | Retrieve the subnet ID of OUT network. | 4.345634 | 4.134041 | 1.051183 |
if 'in' not in cls.ip_db_obj:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None
db_obj = cls.ip_db_obj.get('in')
in_subnet_dict = cls.get_in_ip_addr(tenant_id)
sub = db_obj.get_subnet(in_subnet_dict.get('subnet'))
return sub.network_id | def get_in_net_id(cls, tenant_id) | Retrieve the network ID of IN network. | 4.384712 | 4.137075 | 1.059858 |
if 'out' not in cls.ip_db_obj:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return None
db_obj = cls.ip_db_obj.get('out')
out_subnet_dict = cls.get_out_ip_addr(tenant_id)
sub = db_obj.get_subnet(out_subnet_dict.get('subnet'))
return sub.network_id | def get_out_net_id(cls, tenant_id) | Retrieve the network ID of OUT network. | 4.351363 | 4.150635 | 1.048361 |
if nwk is not None:
if nwk.source == fw_const.FW_CONST:
return True
return False
if nwk_name in fw_const.DUMMY_SERVICE_NWK and (
len(nwk_name) == len(fw_const.DUMMY_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.IN_SERVICE_NWK and (
len(nwk_name) == len(fw_const.IN_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
if nwk_name in fw_const.OUT_SERVICE_NWK and (
len(nwk_name) == len(fw_const.OUT_SERVICE_NWK) +
fw_const.SERVICE_NAME_EXTRA_LEN):
return True
return False | def is_network_source_fw(cls, nwk, nwk_name) | Check if SOURCE is FIREWALL, if yes return TRUE.
If source is None or entry not in NWK DB, check from Name.
Name should have constant AND length should match. | 2.05508 | 2.028248 | 1.013229 |
cfg = config.CiscoDFAConfig().cfg
subnet = subnet.split('/')[0]
in_sub_dict = cls.get_in_ip_addr(tenant_id)
if not in_sub_dict:
return False
if in_sub_dict.get('subnet') == subnet:
return True
out_sub_dict = cls.get_out_ip_addr(tenant_id)
if not out_sub_dict:
return False
if out_sub_dict.get('subnet') == subnet:
return True
dummy_sub = cfg.firewall.fw_service_dummy_ip_subnet
dummy_sub = dummy_sub.split('/')[0]
return subnet == dummy_sub | def is_subnet_source_fw(cls, tenant_id, subnet) | Check if the subnet is created as a result of any FW operation. | 3.033057 | 2.915261 | 1.040407 |
self.fabric_state_map = {
fw_const.INIT_STATE_STR: fw_const.OS_IN_NETWORK_STATE,
fw_const.OS_IN_NETWORK_CREATE_FAIL:
fw_const.OS_IN_NETWORK_STATE,
fw_const.OS_IN_NETWORK_CREATE_SUCCESS:
fw_const.OS_OUT_NETWORK_STATE,
fw_const.OS_OUT_NETWORK_CREATE_FAIL:
fw_const.OS_OUT_NETWORK_STATE,
fw_const.OS_OUT_NETWORK_CREATE_SUCCESS:
fw_const.OS_DUMMY_RTR_STATE,
fw_const.OS_DUMMY_RTR_CREATE_FAIL:
fw_const.OS_DUMMY_RTR_STATE,
fw_const.OS_DUMMY_RTR_CREATE_SUCCESS:
fw_const.DCNM_IN_NETWORK_STATE,
fw_const.DCNM_IN_NETWORK_CREATE_FAIL:
fw_const.DCNM_IN_NETWORK_STATE,
fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS:
fw_const.DCNM_IN_PART_UPDATE_STATE,
fw_const.DCNM_IN_PART_UPDATE_FAIL:
fw_const.DCNM_IN_PART_UPDATE_STATE,
fw_const.DCNM_IN_PART_UPDATE_SUCCESS:
fw_const.DCNM_OUT_PART_STATE,
fw_const.DCNM_OUT_PART_CREATE_FAIL:
fw_const.DCNM_OUT_PART_STATE,
fw_const.DCNM_OUT_PART_CREATE_SUCCESS:
fw_const.DCNM_OUT_NETWORK_STATE,
fw_const.DCNM_OUT_NETWORK_CREATE_FAIL:
fw_const.DCNM_OUT_NETWORK_STATE,
fw_const.DCNM_OUT_NETWORK_CREATE_SUCCESS:
fw_const.DCNM_OUT_PART_UPDATE_STATE,
fw_const.DCNM_OUT_PART_UPDATE_FAIL:
fw_const.DCNM_OUT_PART_UPDATE_STATE,
fw_const.DCNM_OUT_PART_UPDATE_SUCCESS:
fw_const.FABRIC_PREPARE_DONE_STATE} | def initialize_create_state_map(self) | This is a mapping of create result message string to state. | 1.526443 | 1.497178 | 1.019547 |
self.fabric_state_del_map = {
fw_const.INIT_STATE_STR: fw_const.OS_IN_NETWORK_STATE,
fw_const.OS_IN_NETWORK_DEL_FAIL:
fw_const.OS_IN_NETWORK_STATE,
fw_const.OS_IN_NETWORK_DEL_SUCCESS:
fw_const.INIT_STATE,
fw_const.OS_OUT_NETWORK_DEL_FAIL:
fw_const.OS_OUT_NETWORK_STATE,
fw_const.OS_OUT_NETWORK_DEL_SUCCESS:
fw_const.OS_IN_NETWORK_STATE,
fw_const.OS_DUMMY_RTR_DEL_FAIL:
fw_const.OS_DUMMY_RTR_STATE,
fw_const.OS_DUMMY_RTR_DEL_SUCCESS:
fw_const.OS_OUT_NETWORK_STATE,
fw_const.DCNM_IN_NETWORK_DEL_FAIL:
fw_const.DCNM_IN_NETWORK_STATE,
fw_const.DCNM_IN_NETWORK_DEL_SUCCESS:
fw_const.OS_DUMMY_RTR_STATE,
fw_const.DCNM_IN_PART_UPDDEL_FAIL:
fw_const.DCNM_IN_PART_UPDATE_STATE,
fw_const.DCNM_IN_PART_UPDDEL_SUCCESS:
fw_const.DCNM_IN_NETWORK_STATE,
fw_const.DCNM_OUT_PART_DEL_FAIL:
fw_const.DCNM_OUT_PART_STATE,
fw_const.DCNM_OUT_PART_DEL_SUCCESS:
fw_const.DCNM_IN_PART_UPDATE_STATE,
fw_const.DCNM_OUT_NETWORK_DEL_FAIL:
fw_const.DCNM_OUT_NETWORK_STATE,
fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS:
fw_const.DCNM_OUT_PART_STATE,
fw_const.DCNM_OUT_PART_UPDDEL_FAIL:
fw_const.DCNM_OUT_PART_UPDATE_STATE,
fw_const.DCNM_OUT_PART_UPDDEL_SUCCESS:
fw_const.DCNM_OUT_NETWORK_STATE} | def initialize_delete_state_map(self) | This is a mapping of delete result message string to state. | 1.574408 | 1.540228 | 1.022191 |
self.fabric_fsm = {
fw_const.INIT_STATE:
[self.init_state, self.init_state],
fw_const.OS_IN_NETWORK_STATE:
[self.create_os_in_nwk, self.delete_os_in_nwk],
fw_const.OS_OUT_NETWORK_STATE:
[self.create_os_out_nwk, self.delete_os_out_nwk],
fw_const.OS_DUMMY_RTR_STATE:
[self.create_os_dummy_rtr, self.delete_os_dummy_rtr],
fw_const.DCNM_IN_NETWORK_STATE:
[self.create_dcnm_in_nwk, self.delete_dcnm_in_nwk],
fw_const.DCNM_IN_PART_UPDATE_STATE:
[self.update_dcnm_in_part, self.clear_dcnm_in_part],
fw_const.DCNM_OUT_PART_STATE:
[self.create_dcnm_out_part, self.delete_dcnm_out_part],
fw_const.DCNM_OUT_NETWORK_STATE:
[self.create_dcnm_out_nwk, self.delete_dcnm_out_nwk],
fw_const.DCNM_OUT_PART_UPDATE_STATE:
[self.update_dcnm_out_part, self.clear_dcnm_out_part],
fw_const.FABRIC_PREPARE_DONE_STATE:
[self.prepare_fabric_done, self.prepare_fabric_done]} | def initialize_fsm(self) | Initializing the Finite State Machine.
This is a mapping of state to a dict of appropriate create and delete
functions. | 1.78531 | 1.758842 | 1.015049 |
self.service_attr[tenant_id] = ServiceIpSegTenantMap()
self.store_tenant_obj(tenant_id, self.service_attr[tenant_id]) | def create_serv_obj(self, tenant_id) | Creates and stores the service object associated with a tenant. | 7.766625 | 6.700198 | 1.159163 |
self.del_obj(tenant_id, self.service_attr[tenant_id])
del self.service_attr[tenant_id] | def delete_serv_obj(self, tenant_id) | Creates and stores the service object associated with a tenant. | 4.172486 | 3.750159 | 1.112616 |
network_dict = {'name': net_dict.get('name'),
'config_profile': net_dict.get('config_profile'),
'segmentation_id': net_dict.get('segmentation_id'),
'tenant_id': tenant_id,
'fwd_mode': net_dict.get('fwd_mode'),
'vlan': net_dict.get('vlan_id')}
self.add_network_db(net, network_dict, fw_const.FW_CONST, result) | def store_net_db(self, tenant_id, net, net_dict, result) | Store service network in DB. | 2.916809 | 2.890874 | 1.008971 |
serv_obj = self.get_service_obj(tenant_id)
sub = subnet_dict.get('allocation_pools')[0].get('start')
serv_obj.update_fw_local_cache(net, direc, sub)
serv_obj.commit_fw_db() | def store_fw_db(self, tenant_id, net, subnet_dict, direc) | Calls the service object routine to commit the FW entry to DB. | 4.015244 | 3.580151 | 1.121529 |
serv_obj = self.get_service_obj(tenant_id)
serv_obj.update_fw_local_result(os_status, dcnm_status, dev_status)
serv_obj.commit_fw_db_result() | def update_fw_db_result(self, tenant_id, os_status=None, dcnm_status=None,
dev_status=None) | Update the FW DB Result and commit it in DB.
Calls the service object routine to commit the result of a FW
operation in to DB | 3.112475 | 2.795871 | 1.11324 |
serv_obj = self.get_service_obj(tenant_id)
serv_obj.update_fw_local_router(net_id, subnet_id, router_id,
os_status)
serv_obj.commit_fw_db()
serv_obj.commit_fw_db_result() | def store_fw_db_router(self, tenant_id, net_id, subnet_id, router_id,
os_status) | Store the result of FW router operation in DB.
Calls the service object routine to commit the result of router
operation in to DB, after updating the local cache. | 3.347733 | 2.864558 | 1.168674 |
self.store_net_db(tenant_id, net, net_dict, result)
self.store_fw_db(tenant_id, net, subnet_dict, direc)
self.update_fw_db_result(tenant_id, os_status=os_status,
dcnm_status=dcnm_status,
dev_status=dev_status) | def store_net_fw_db(self, tenant_id, net, net_dict, subnet_dict,
direc, result, os_status=None, dcnm_status=None,
dev_status=None) | Save the entries in Network and Firewall DB.
Stores the entries into Network DB and Firewall DB as well as update
the result of operation into FWDB. Generally called by OS operations
that wants to modify both the Net DB and FW DB. | 2.021578 | 1.767108 | 1.144003 |
subnet_lst = self.os_helper.get_all_subnets_cidr(no_mask=True)
ip_next = obj.allocate_subnet(subnet_lst)
if ip_next is None:
LOG.error("Unable to allocate a subnet for direction %s",
direc)
return ip_next | def check_allocate_ip(self, obj, direc) | This function allocates a subnet from the pool.
It first checks to see if Openstack is already using the subnet.
If yes, it retries until it finds a free subnet not used by
Openstack. | 5.894499 | 5.386 | 1.094411 |
# TODO(padkrish) Put in a common functionality for services.
if direc == 'in':
subnet_dict = self.get_in_ip_addr(tenant_id)
else:
subnet_dict = self.get_out_ip_addr(tenant_id)
if subnet_dict:
return subnet_dict
if direc == 'in':
# ip_next = self.service_in_ip.allocate_subnet()
ip_next = self.check_allocate_ip(self.service_in_ip, "in")
else:
# ip_next = self.service_out_ip.allocate_subnet()
ip_next = self.check_allocate_ip(self.service_out_ip, "out")
return {'subnet': ip_next, 'start': self.get_start_ip(ip_next),
'end': self.get_end_ip(ip_next),
'gateway': self.get_gateway(ip_next),
'sec_gateway': self.get_secondary_gateway(ip_next)} | def get_next_ip(self, tenant_id, direc) | Retrieve the next available subnet.
Given a tenant, it returns the service subnet values assigned
to it based on direction. | 2.578085 | 2.528884 | 1.019456 |
if direc == 'in':
self.service_in_ip.release_subnet(cidr)
else:
self.service_out_ip.release_subnet(cidr) | def release_subnet(self, cidr, direc) | Routine to release a subnet from the DB. | 2.945766 | 2.932944 | 1.004372 |
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
if direc == 'in':
name = fw_id[0:4] + fw_const.IN_SERVICE_SUBNET + (
fw_id[len(fw_id) - 4:])
else:
name = fw_id[0:4] + fw_const.OUT_SERVICE_SUBNET + (
fw_id[len(fw_id) - 4:])
subnet_dict = {'enable_dhcp': False,
'tenant_id': tenant_id,
'name': name,
'cidr': subnet + '/24',
'gateway_ip': gateway,
'secondary_gw': sec_gateway,
'ip_version': 4}
subnet_dict['allocation_pools'] = [{'start': start, 'end': end}]
# TODO(padkrish) Network ID and subnet ID are not filled.
return subnet_dict | def fill_dcnm_subnet_info(self, tenant_id, subnet, start, end, gateway,
sec_gateway, direc) | Fills the DCNM subnet parameters.
Function that fills the subnet parameters for a tenant required by
DCNM. | 2.565334 | 2.60534 | 0.984645 |
serv_obj = self.get_service_obj(tenant_id)
subnet_dict = serv_obj.get_dcnm_subnet_dict(direc)
return subnet_dict | def retrieve_dcnm_subnet_info(self, tenant_id, direc) | Retrieves the DCNM subnet info for a tenant. | 3.075362 | 3.08635 | 0.99644 |
serv_obj = self.get_service_obj(tenant_id)
subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc)
if subnet_dict:
return subnet_dict
ip_subnet_dict = self.get_next_ip(tenant_id, direc)
subnet_dict = self.fill_dcnm_subnet_info(
tenant_id, ip_subnet_dict.get('subnet'),
ip_subnet_dict.get('start'), ip_subnet_dict.get('end'),
ip_subnet_dict.get('gateway'), ip_subnet_dict.get('sec_gateway'),
direc)
serv_obj.store_dcnm_subnet_dict(subnet_dict, direc)
return subnet_dict | def alloc_retrieve_subnet_info(self, tenant_id, direc) | Allocate and store Subnet.
This function initially checks if subnet is allocated for a tenant
for the in/out direction. If not, it calls routine to allocate a subnet
and stores it on tenant object. | 2.6204 | 2.648502 | 0.98939 |
serv_obj = self.get_service_obj(tenant_id)
net_dict = serv_obj.get_dcnm_net_dict(direc)
return net_dict | def retrieve_dcnm_net_info(self, tenant_id, direc) | Retrieves the DCNM network info for a tenant. | 3.107413 | 3.129271 | 0.993015 |
net_dict = self.retrieve_dcnm_net_info(tenant_id, direc)
if not net_dict:
return None
net_dict['vlan_id'] = vlan_id
if vlan_id != 0:
net_dict['mob_domain'] = True
net_dict['segmentation_id'] = segmentation_id
return net_dict | def update_dcnm_net_info(self, tenant_id, direc, vlan_id,
segmentation_id) | Update the DCNM net info with allocated values of seg/vlan. | 2.674123 | 2.716772 | 0.984302 |
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
net_dict = {'status': 'ACTIVE', 'admin_state_up': True,
'tenant_id': tenant_id, 'provider:network_type': 'local',
'vlan_id': vlan_id, 'segmentation_id': segmentation_id}
if vlan_id == 0:
net_dict.update({'mob_domain': False, 'mob_domain_name': None})
else:
net_dict.update({'mob_domain': True})
# TODO(padkrish) NWK ID are not filled.
if direc == 'in':
name = fw_id[0:4] + fw_const.IN_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_dict.update({'name': name, 'part_name': None,
'config_profile': self.serv_host_prof,
'fwd_mode': self.serv_host_mode})
else:
name = fw_id[0:4] + fw_const.OUT_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_dict.update({'name': name,
'part_name': fw_const.SERV_PART_NAME,
'config_profile': self.serv_ext_prof,
'fwd_mode': self.serv_ext_mode})
return net_dict | def fill_dcnm_net_info(self, tenant_id, direc, vlan_id=0,
segmentation_id=0) | Fill DCNM network parameters.
Function that fills the network parameters for a tenant required by
DCNM. | 2.827163 | 2.880579 | 0.981457 |
serv_obj = self.get_service_obj(tenant_id)
net_dict = self.retrieve_dcnm_net_info(tenant_id, direc)
if net_dict:
return net_dict
net_dict = self.fill_dcnm_net_info(tenant_id, direc)
serv_obj.store_dcnm_net_dict(net_dict, direc)
return net_dict | def retrieve_network_info(self, tenant_id, direc) | Retrieve the DCNM Network information.
Retrieves DCNM net dict if already filled, else, it calls
routines to fill the net info and store it in tenant obj. | 2.902364 | 2.225033 | 1.304414 |
segmentation_id = self.service_segs.allocate_segmentation_id(
net_id, source=fw_const.FW_CONST)
return segmentation_id | def alloc_seg(self, net_id) | Allocates the segmentation ID. | 10.716772 | 9.016981 | 1.18851 |
vlan_id = self.service_vlans.allocate_segmentation_id(
net_id, source=fw_const.FW_CONST)
return vlan_id | def alloc_vlan(self, net_id) | Allocates the vlan ID. | 9.847564 | 8.772195 | 1.122588 |
subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc)
if not subnet_dict:
LOG.error("Subnet dict not found for tenant %s", tenant_id)
return
subnet = subnet_dict['cidr'].split('/')[0]
if direc == 'in':
self.service_in_ip.update_subnet(subnet, net_id, subnet_id)
else:
self.service_out_ip.update_subnet(subnet, net_id, subnet_id) | def update_subnet_db_info(self, tenant_id, direc, net_id, subnet_id) | Update the subnet DB with Net and Subnet ID, given the subnet. | 2.513008 | 2.569583 | 0.977983 |
serv_obj = self.get_service_obj(tenant_id)
net_dict = self.update_dcnm_net_info(tenant_id, direc, vlan_id,
segmentation_id)
serv_obj.store_dcnm_net_dict(net_dict, direc)
return net_dict | def update_net_info(self, tenant_id, direc, vlan_id, segmentation_id) | Update the DCNM netinfo with vlan and segmentation ID. | 3.224114 | 3.008064 | 1.071824 |
net_dict = self.retrieve_dcnm_net_info(tenant_id, direc)
net = utils.Dict2Obj(net_dict)
subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc)
subnet = utils.Dict2Obj(subnet_dict)
try:
self.dcnm_obj.create_service_network(tenant_name, net, subnet)
except dexc.DfaClientRequestFailed:
LOG.error("Failed to create network in DCNM %s", direc)
return False
return True | def _create_service_nwk(self, tenant_id, tenant_name, direc) | Function to create the service in network in DCNM. | 2.897126 | 2.76031 | 1.049565 |
net_dict = {}
if direc == 'in':
seg, vlan = self.get_in_seg_vlan(tenant_id)
net_dict['part_name'] = None
else:
seg, vlan = self.get_out_seg_vlan(tenant_id)
net_dict['part_name'] = fw_const.SERV_PART_NAME
net_dict['segmentation_id'] = seg
net_dict['vlan'] = vlan
net = utils.Dict2Obj(net_dict)
ret = True
try:
self.dcnm_obj.delete_service_network(tenant_name, net)
except dexc.DfaClientRequestFailed:
LOG.error("Failed to delete network in DCNM %s", direc)
ret = False
return ret | def _delete_service_nwk(self, tenant_id, tenant_name, direc) | Function to delete the service in network in DCNM. | 3.044406 | 2.950603 | 1.031791 |
if tenant_id not in self.service_attr:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = self.get_service_obj(tenant_id)
return tenant_obj.get_dummy_router_net() | def get_dummy_router_net(self, tenant_id) | Retrieves the dummy router information from service object. | 4.480738 | 3.763263 | 1.190652 |
vrf_prof_str = self.serv_part_vrf_prof
self.dcnm_obj.create_partition(tenant_name, fw_const.SERV_PART_NAME,
None, vrf_prof_str,
desc="Service Partition") | def _create_out_partition(self, tenant_id, tenant_name) | Function to create a service partition. | 9.994065 | 8.622262 | 1.1591 |
self.dcnm_obj.update_project(tenant_name, part_name,
service_node_ip=srvc_ip,
vrf_prof=vrf_prof,
desc="Service Partition") | def _update_partition_srvc_node_ip(self, tenant_name, srvc_ip,
vrf_prof=None, part_name=None) | Function to update srvc_node address of partition. | 5.154759 | 4.988279 | 1.033374 |
self.dcnm_obj.update_project(tenant_name, part_name, dci_id=dci_id,
vrf_prof=vrf_prof) | def _update_partition_dci_id(self, tenant_name, dci_id,
vrf_prof=None, part_name=None) | Function to update DCI ID of partition. | 3.859684 | 3.625362 | 1.064634 |
in_subnet_dict = self.get_in_ip_addr(tenant_id)
# self._update_partition(tenant_name, in_ip)
# Need more generic thinking on this one TODO(padkrish)
next_hop = str(netaddr.IPAddress(in_subnet_dict.get('subnet')) + 2)
self._update_partition_srvc_node_ip(tenant_name, next_hop) | def _update_partition_in_create(self, tenant_id, tenant_name) | Function to update a partition. | 7.73457 | 7.565996 | 1.02228 |
vrf_prof = self.serv_part_vrf_prof
seg = self.dcnm_obj.get_partition_segmentId(tenant_name,
fw_const.SERV_PART_NAME)
if seg is None:
return False
else:
self._update_partition_dci_id(tenant_name, seg,
vrf_prof=vrf_prof,
part_name=fw_const.SERV_PART_NAME)
return True | def _update_partition_out_create(self, tenant_id, tenant_name) | Function to update a partition. | 6.062856 | 6.086318 | 0.996145 |
self.dcnm_obj.delete_partition(tenant_name, fw_const.SERV_PART_NAME) | def _delete_partition(self, tenant_id, tenant_name) | Function to delete a service partition. | 11.44726 | 8.168919 | 1.401319 |
seg = self.alloc_seg(net_id)
vlan = 0
# VLAN allocation is only needed for physical firewall case
if not is_fw_virt:
vlan = self.alloc_vlan(net_id)
# Updating the local cache
self.update_net_info(tenant_id, direc, vlan, seg) | def allocate_seg_vlan(self, net_id, is_fw_virt, direc, tenant_id) | allocate segmentation ID and VLAN ID.
Allocate vlan, seg thereby storing NetID atomically.
This saves an extra step to update DB with NetID after allocation.
Also may save an extra step after restart, if process crashed
after allocation but before updating DB with NetID. Now, since
both steps are combined, Vlan/Seg won't be allocated w/o NetID. | 4.697425 | 4.936687 | 0.951534 |
try:
gw = str(netaddr.IPAddress(subnet['cidr'].split('/')[0]) + 2)
net_id, subnet_id = self.os_helper.create_network(
network['name'], tenant_id, subnet['cidr'], gw=gw)
if net_id is None or subnet_id is None:
self.release_subnet(subnet['cidr'], direction)
return net_id, subnet_id
except Exception as exc:
self.release_subnet(subnet['cidr'], direction)
LOG.error("Create network for tenant %(tenant)s "
"network %(name)s direction %(dir)s failed "
"exc %(exc)s ",
{'tenant': tenant_name, 'name': network['name'],
'dir': direction, 'exc': str(exc)})
return None, None | def create_openstack_network(self, subnet, network, tenant_id,
tenant_name, direction) | Helper function to create openstack network.
The net_id and subnet_id is returned. Upon failure, the subnet is
deallocated. | 2.568557 | 2.400205 | 1.070141 |
subnet = self.alloc_retrieve_subnet_info(tenant_id, direc)
network = self.retrieve_network_info(tenant_id, direc)
net_id, subnet_id = self.create_openstack_network(subnet, network,
tenant_id,
tenant_name, direc)
if not net_id or not subnet_id:
return net_id, subnet_id
self.allocate_seg_vlan(net_id, is_fw_virt, direc, tenant_id)
self.update_subnet_db_info(tenant_id, direc, net_id, subnet_id)
return net_id, subnet_id | def _create_os_nwk(self, tenant_id, tenant_name, direc, is_fw_virt=False) | Function to create Openstack network.
This function does the following:
1. Allocate an IP address with the net_id/subnet_id not filled in the
DB.
2. Fill network parameters w/o vlan, segmentation_id, because we don't
have net_id to store in DB.
3. Create a Openstack network, using the network parameters created in
the previous step. At this point we will have a net_id.
4. Allocate segmentation_id, vlan and along with net_id store it in the
DB.
5. Update IP DB with net_id created in step 3. So, after restart
deallocate any IP DB entries that does not have a net_id/subnet_id. | 2.827581 | 2.623475 | 1.0778 |
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
rtr_nwk = fw_id[0:4] + fw_const.DUMMY_SERVICE_NWK + (
fw_id[len(fw_id) - 4:])
net_id, subnet_id = self.os_helper.create_network(
rtr_nwk, tenant_id, self.servicedummy_ip_subnet)
if net_id is None or subnet_id is None:
return None, None
net_dict = {}
net_dict['name'] = rtr_nwk
self.store_net_db(tenant_id, net_id, net_dict, 'SUCCESS')
subnet_lst = set()
subnet_lst.add(subnet_id)
if rtr_id is None:
self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id,
net_id)
return None, None
ret = self.os_helper.add_intf_router(rtr_id, tenant_id, subnet_lst)
if not ret:
self.os_helper.delete_network(rtr_nwk, tenant_id, subnet_id,
net_id)
return None, None
return net_id, subnet_id | def _attach_dummy_intf_rtr(self, tenant_id, tenant_name, rtr_id) | Function to create a dummy router and interface. | 2.8566 | 2.808566 | 1.017103 |
dummy_router_dict = self.get_dummy_router_net(tenant_id)
ret = self.delete_os_dummy_rtr_nwk(dummy_router_dict.get('router_id'),
dummy_router_dict.get('net_id'),
dummy_router_dict.get('subnet_id'))
# Release the network DB entry
self.delete_network_db(dummy_router_dict.get('net_id'))
return ret | def _delete_dummy_intf_rtr(self, tenant_id, tenant_name, rtr_id) | Function to delete a dummy interface of a router. | 3.90066 | 3.732934 | 1.044932 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.