code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
endpoints = RpcCallBacks(self) self.server = rpc.DfaRpcServer(self.ser_q, self._host, self.cfg.dfa_rpc.transport_url, endpoints, exchange=constants.DFA_EXCHANGE)
def _setup_rpc(self)
Setup RPC server for dfa server.
17.293901
13.118782
1.318255
orch_id = cfg.dcnm.orchestrator_id try: segid_range = self.dcnm_client.get_segmentid_range(orch_id) if segid_range is None: self.dcnm_client.set_segmentid_range(orch_id, seg_id_min, seg_id_max) else: conf_min, _, conf_max = segid_range[ "segmentIdRanges"].partition("-") if int(conf_min) != seg_id_min or int(conf_max) != seg_id_max: self.dcnm_client.update_segmentid_range(orch_id, seg_id_min, seg_id_max) except dexc.DfaClientRequestFailed as exc: LOG.error("Segment ID range could not be created/updated" " on DCNM: %s", exc) raise SystemExit(exc)
def register_segment_dcnm(self, cfg, seg_id_min, seg_id_max)
Register segmentation id pool with DCNM.
2.723522
2.691576
1.011869
if self.get_project_name(proj_id): LOG.info("project %s exists, returning", proj_id) return if not proj: try: proj = self.keystone_event._service.projects.get(proj_id) except Exception: LOG.error("Failed to find project %s.", proj_id) return # In the project name, dci_id may be included. Check if this is the # case and extact the dci_id from the name, and provide dci_id when # creating the project. proj_name, dci_id = self._get_dci_id_and_proj_name(proj.name) if proj_name in reserved_project_name: proj_name = "_".join((proj_name, self.cfg.dcnm.orchestrator_id)) # The default partition name is 'os' (i.e. openstack) which reflects # it is created by openstack. part_name = self.cfg.dcnm.default_partition_name if len(':'.join((proj_name, part_name))) > 32: LOG.error('Invalid project name length: %s. The length of ' 'org:part name is greater than 32', len(':'.join((proj_name, part_name)))) return try: self.dcnm_client.create_project(self.cfg.dcnm.orchestrator_id, proj_name, part_name, dci_id, proj.description) except dexc.DfaClientRequestFailed: # Failed to send create project in DCNM. # Save the info and mark it as failure and retry it later. self.update_project_info_cache(proj_id, name=proj_name, dci_id=dci_id, result=constants.CREATE_FAIL) LOG.error("Failed to create project %s on DCNM.", proj_name) else: self.update_project_info_cache(proj_id, name=proj_name, dci_id=dci_id) LOG.debug('project %(name)s %(dci)s %(desc)s', ( {'name': proj_name, 'dci': dci_id, 'desc': proj.description})) self.project_create_notif(proj_id, proj_name)
def project_create_func(self, proj_id, proj=None)
Create project given project uuid
3.601383
3.590003
1.00317
LOG.debug("Processing create %(proj)s event.", {'proj': proj_info}) proj_id = proj_info.get('resource_info') self.project_create_func(proj_id)
def project_create_event(self, proj_info)
Create project.
5.787791
5.598679
1.033778
LOG.debug("Processing project_update_event %(proj)s.", {'proj': proj_info}) proj_id = proj_info.get('resource_info') try: proj = self.keystone_event._service.projects.get(proj_id) except Exception: LOG.error("Failed to find project %s.", proj_id) return new_proj_name, new_dci_id = self._get_dci_id_and_proj_name(proj.name) # Check if project name and dci_id are the same, there is no change. orig_proj_name = self.get_project_name(proj_id) orig_dci_id = self.get_dci_id(proj_id) if orig_proj_name == new_proj_name and new_dci_id == orig_dci_id: # This is an invalid update event. LOG.warning('Project update event for %(proj)s is received ' 'without changing in the project name: ' '%(orig_proj)s. Ignoring the event.', {'proj': proj_id, 'orig_proj': orig_proj_name}) return if orig_proj_name != new_proj_name: # Project has new name and in DCNM the name of project cannot be # modified. It is an invalid update. Do not process the event. LOG.debug('Update request cannot be processed as name of project' ' is changed: %(proj)s %(orig_name)s %(orig_dci)s to ' '%(new_name)s %(new_dci)s.', ( {'proj': proj_id, 'orig_name': orig_proj_name, 'orig_dci': orig_dci_id, 'new_name': new_proj_name, 'new_dci': new_dci_id})) return # Valid update request. LOG.debug('Changing project DCI id for %(proj)s from %(orig_dci)s to ' '%(new_dci)s.', {'proj': proj_id, 'orig_dci': orig_dci_id, 'new_dci': new_dci_id}) try: self.dcnm_client.update_project(new_proj_name, self.cfg.dcnm. default_partition_name, dci_id=new_dci_id) except dexc.DfaClientRequestFailed: # Failed to update project in DCNM. # Save the info and mark it as failure and retry it later. LOG.error("Failed to update project %s on DCNM.", new_proj_name) self.update_project_info_cache(proj_id, name=new_proj_name, dci_id=new_dci_id, opcode='update', result=constants.UPDATE_FAIL) else: self.update_project_info_cache(proj_id, name=new_proj_name, dci_id=new_dci_id, opcode='update') LOG.debug('Updated project %(proj)s %(name)s.', {'proj': proj_id, 'name': proj.name})
def project_update_event(self, proj_info)
Process project update event. There could be change in project name. DCNM doesn't allow change in project (a.k.a tenant). This event may be received for the DCI update. If the change is for DCI, update the DCI portion of the project name and send the update event to the DCNM.
2.689179
2.579906
1.042356
LOG.debug("Processing project_delete_event...") proj_id = proj_info.get('resource_info') proj_name = self.get_project_name(proj_id) if proj_name: try: self.dcnm_client.delete_project(proj_name, self.cfg.dcnm. default_partition_name) except dexc.DfaClientRequestFailed: # Failed to delete project in DCNM. # Save the info and mark it as failure and retry it later. LOG.error("Failed to create project %s on DCNM.", proj_name) self.update_project_info_cache(proj_id, name=proj_name, opcode='delete', result=constants.DELETE_FAIL) else: self.update_project_info_cache(proj_id, opcode='delete') LOG.debug('Deleted project:%s', proj_name) self.project_delete_notif(proj_id, proj_name)
def project_delete_event(self, proj_info)
Process project delete event.
3.936342
3.807122
1.033942
subnet = subnet_info.get('subnet') if subnet: self.create_subnet(subnet) else: # Check whether request is for subnets. subnets = subnet_info.get('subnets') if subnets: for subnet in subnets: self.create_subnet(subnet)
def subnet_create_event(self, subnet_info)
Process subnet create event.
2.740707
2.55599
1.072268
snet_id = snet.get('id') # This checks if the source of the subnet creation is FW, # If yes, this event is ignored. if self.fw_api.is_subnet_source_fw(snet.get('tenant_id'), snet.get('cidr')): LOG.info("Service subnet %s, returning", snet.get('cidr')) return if snet_id not in self.subnet: self.subnet[snet_id] = {} self.subnet[snet_id].update(snet) net = self.network.get(self.subnet[snet_id].get('network_id')) if not net: LOG.error('Network %(network_id)s does not exist.', {'network_id': self.subnet[snet_id].get('network_id')}) return # Check if the network is created by DCNM. query_net = self.get_network(net.get('id')) if query_net.result != constants.SUBNET_PENDING: LOG.info("Subnet exists, returning") return if query_net and query_net.source.lower() == 'dcnm': # The network is created by DCNM. # No need to process this event. LOG.info('create_subnet: network %(name)s ' 'was created by DCNM. Ignoring processing the ' 'event.', {'name': query_net.name}) return tenant_name = self.get_project_name(snet['tenant_id']) subnet = utils.Dict2Obj(snet) dcnm_net = utils.Dict2Obj(net) if not tenant_name: LOG.error('Project %(tenant_id)s does not exist.', {'tenant_id': subnet.tenant_id}) self.update_network_db(dcnm_net.id, constants.CREATE_FAIL) return try: self.dcnm_client.create_network(tenant_name, dcnm_net, subnet, self.dcnm_dhcp) self.update_network_db(net.get('id'), constants.RESULT_SUCCESS) except dexc.DfaClientRequestFailed: LOG.exception('Failed to create network %(net)s.', {'net': dcnm_net.name}) # Update network database with failure result. self.update_network_db(dcnm_net.id, constants.CREATE_FAIL) self.network_sub_create_notif(snet.get('tenant_id'), tenant_name, snet.get('cidr'))
def create_subnet(self, snet)
Create subnet.
3.214269
3.204755
1.002969
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid, source=source)
def _get_segmentation_id(self, netid, segid, source)
Allocate segmentation id.
7.29658
5.289641
1.379409
net_id = network_info['network_id'] if net_id not in self.network: LOG.error('network_delete_event: net_id %s does not exist.', net_id) return segid = self.network[net_id].get('segmentation_id') tenant_id = self.network[net_id].get('tenant_id') tenant_name = self.get_project_name(tenant_id) net = utils.Dict2Obj(self.network[net_id]) if not tenant_name: LOG.error('Project %(tenant_id)s does not exist.', {'tenant_id': tenant_id}) self.update_network_db(net.id, constants.DELETE_FAIL) return try: self.dcnm_client.delete_network(tenant_name, net) # Put back the segmentation id into the pool. self.seg_drvr.release_segmentation_id(segid) # Remove entry from database and cache. self.delete_network_db(net_id) del self.network[net_id] snets = [k for k in self.subnet if ( self.subnet[k].get('network_id') == net_id)] [self.subnet.pop(s) for s in snets] except dexc.DfaClientRequestFailed: LOG.error('Failed to create network %(net)s.', {'net': net.name}) self.update_network_db(net_id, constants.DELETE_FAIL) # deleting all related VMs instances = self.get_vms() instances_related = [k for k in instances if k.network_id == net_id] for vm in instances_related: LOG.debug("deleting vm %s because network is deleted", vm.name) self.delete_vm_function(vm.port_id, vm) self.network_del_notif(tenant_id, tenant_name, net_id)
def network_delete_event(self, network_info)
Process network delete event.
3.167462
3.119114
1.015501
seg_id = network_info.get('segmentation_id') if not seg_id: LOG.error('Failed to delete network. Invalid network ' 'info %s.', network_info) query_net = self.get_network_by_segid(seg_id) if not query_net: LOG.info('dcnm_network_delete_event: network %(segid)s ' 'does not exist.', {'segid': seg_id}) return if self.fw_api.is_network_source_fw(query_net, query_net.name): LOG.info("Service network %s, returning", query_net.name) return # Send network delete request to neutron try: del_net = self.network.pop(query_net.network_id) self.neutronclient.delete_network(query_net.network_id) self.delete_network_db(query_net.network_id) except Exception as exc: # Failed to delete network. # Put back the entry to the local cache??? self.network[query_net.network_id] = del_net LOG.exception('dcnm_network_delete_event: Failed to delete ' '%(network)s. Reason %(err)s.', {'network': query_net.name, 'err': str(exc)})
def dcnm_network_delete_event(self, network_info)
Process network delete event from DCNM.
3.338871
3.29964
1.011889
leases = None req = dict(ip='0.0.0.0') instances = self.get_vms_for_this_req(**req) if instances is None: return for vm in instances: if not leases: # For the first time finding the leases file. leases = self._get_ip_leases() if not leases: # File does not exist. return for line in leases: if line.startswith('lease') and line.endswith('{\n'): ip_addr = line.split()[1] if 'hardware ethernet' in line: if vm.mac == line.replace(';', '').split()[2]: LOG.info('Find IP address %(ip)s for %(mac)s', {'ip': ip_addr, 'mac': vm.mac}) try: rule_info = dict(ip=ip_addr, mac=vm.mac, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update" "rules.") else: params = dict(columns=dict(ip=ip_addr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ip_addr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to ' 'agent.')
def update_port_ip_address(self)
Find the ip address that assinged to a port via DHCP The port database will be updated with the ip address.
3.787166
3.745615
1.011093
agent_host = vm_info.get('host') if not agent_host: LOG.info("vm/port is not bound to host, not sending vm info") return True try: self.neutron_event.send_vm_info(agent_host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): # Failed to send info to the agent. Keep the data in the # database as failure to send it later. LOG.error('Failed to send VM info to agent %s', agent_host) return False else: return True
def send_vm_info(self, vm_info)
Send vm info to the compute host. it will return True/False
4.596818
4.465583
1.029388
# This request is received from an agent when it runs for the first # time and uplink is detected. agent = payload.get('agent') LOG.debug('request_vms_info: Getting VMs info for %s', agent) req = dict(host=payload.get('agent')) instances = self.get_vms_for_this_req(**req) vm_info = [] for vm in instances: vm_info.append(dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=vm.ip, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco'))) try: self.neutron_event.send_vm_info(agent, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to agent.')
def request_vms_info(self, payload)
Get the VMs from the database and send the info to the agent.
4.541273
4.439182
1.022998
# This request is received from an agent when it run for the first # Send the uplink name (physical port name that connectes compute # node and switch fabric), agent = payload.get('agent') config_res = self.get_agent_configurations(agent) LOG.debug('configurations on %(agent)s is %(cfg)s', ( {'agent': agent, 'cfg': config_res})) try: self.neutron_event.send_msg_to_agent(agent, constants.UPLINK_NAME, config_res) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to send uplink name to agent.")
def request_uplink_info(self, payload)
Get the uplink from the database and send the info to the agent.
8.910966
8.670516
1.027732
# This request is received from CLI for setting ip address of an # instance. macaddr = payload.get('mac') ipaddr = payload.get('ip') # Find the entry associated with the mac in the database. req = dict(mac=macaddr) instances = self.get_vms_for_this_req(**req) for vm in instances: LOG.info('Updating IP address: %(ip)s %(mac)s.', {'ip': ipaddr, 'mac': macaddr}) # Send request to update the rule. try: rule_info = dict(ip=ipaddr, mac=macaddr, port=vm.port_id, status='up') self.neutron_event.update_ip_rule(str(vm.host), str(rule_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error("RPC error: Failed to update rules.") else: # Update the database. params = dict(columns=dict(ip=ipaddr)) self.update_vm_db(vm.port_id, **params) # Send update to the agent. vm_info = dict(status=vm.status, vm_mac=vm.mac, segmentation_id=vm.segmentation_id, host=vm.host, port_uuid=vm.port_id, net_uuid=vm.network_id, oui=dict(ip_addr=ipaddr, vm_name=vm.name, vm_uuid=vm.instance_id, gw_mac=vm.gw_mac, fwd_mod=vm.fwd_mod, oui_id='cisco')) try: self.neutron_event.send_vm_info(vm.host, str(vm_info)) except (rpc.MessagingTimeout, rpc.RPCException, rpc.RemoteError): LOG.error('Failed to send VM info to agent.')
def set_static_ip_address(self, payload)
Set static ip address for a VM.
3.824941
3.816087
1.00232
port_id = payload.get('port_id') result = payload.get('result') if port_id and result: # Update the VM's result field. params = dict(columns=dict(result=result)) self.update_vm_db(port_id, **params)
def vm_result_update(self, payload)
Update the result field in VM database. This request comes from an agent that needs to update the result in VM database to success or failure to reflect the operation's result in the agent.
3.92021
3.596248
1.090083
port_info = self.neutronclient.show_port(port_id) port = port_info.get('port') if not port: LOG.error("Can not retrieve port info for port %s" % port_id) return LOG.debug("lbaas add port, %s", port) if not port['binding:host_id']: LOG.info("No host bind for lbaas port, octavia case") return port["device_id"] = lb_id vm_info = self._make_vm_info(port, 'up', constants.LBAAS_PREFIX) self.port[port_id] = vm_info if self.send_vm_info(vm_info): self.add_vms_db(vm_info, constants.RESULT_SUCCESS) else: self.add_vms_db(vm_info, constants.CREATE_FAIL)
def add_lbaas_port(self, port_id, lb_id)
Give port id, get port info and send vm info to agent. :param port_id: port id of vip port :param lb_id: vip id for v1 and lbaas_id for v2
3.88429
3.949587
0.983467
lb_id = lb_id.replace('-', '') req = dict(instance_id=lb_id) instances = self.get_vms_for_this_req(**req) for vm in instances: LOG.info("deleting lbaas vm %s " % vm.name) self.delete_vm_function(vm.port_id, vm)
def delete_lbaas_port(self, lb_id)
send vm down event and delete db. :param lb_id: vip id for v1 and lbaas_id for v2
4.960404
5.05713
0.980873
vip_data = vip_info.get('vip') port_id = vip_data.get('port_id') vip_id = vip_data.get('id') self.add_lbaas_port(port_id, vip_id)
def vip_create_event(self, vip_info)
Process vip create event.
2.549826
2.418076
1.054485
listener_data = listener_info.get('listener') lb_list = listener_data.get('loadbalancers') for lb in lb_list: lb_id = lb.get('id') req = dict(instance_id=(lb_id.replace('-', ''))) instances = self.get_vms_for_this_req(**req) if not instances: lb_info = self.neutronclient.show_loadbalancer(lb_id) if lb_info: port_id = lb_info["loadbalancer"]["vip_port_id"] self.add_lbaas_port(port_id, lb_id) else: LOG.info("lbaas port for lb %s already added" % lb_id)
def listener_create_event(self, listener_info)
Process listener create event. This is lbaas v2 vif will be plugged into ovs when first listener is created and unpluged from ovs when last listener is deleted
3.321176
3.152537
1.053493
lb_list = self.neutronclient.list_loadbalancers() for lb in lb_list.get('loadbalancers'): if not lb.get("listeners"): lb_id = lb.get('id') LOG.info("Deleting lb %s port" % lb_id) self.delete_lbaas_port(lb_id)
def listener_delete_event(self, listener_info)
Process listener delete event. This is lbaas v2 vif will be plugged into ovs when first listener is created and unpluged from ovs when last listener is deleted. as the data only contains listener id, we will scan all loadbalancers from db and delete the vdp if their admin state is down in that loadbalancer
3.469385
2.996331
1.157878
pool_data = pool_info.get('pool') listeners = pool_data.get('listeners') for listener in listeners: l_id = listener.get('id') l_info = self.neutronclient.show_listener(l_id) self.listener_create_event(l_info)
def pool_create_event(self, pool_info)
Process pool create event. Extract pool info and get listener info and call next listen_create_event
2.656656
2.698029
0.984665
p = self.keystone_event._service.projects.list() for proj in p: if proj.name in not_create_project_name: continue LOG.info("Syncing project %s" % proj.name) self.project_create_func(proj.id, proj=proj)
def sync_projects(self)
Sync projects. This function will retrieve project from keystone and populate them dfa database and dcnm
6.385653
6.125551
1.042462
nets = self.neutronclient.list_networks() for net in nets.get("networks"): LOG.info("Syncing network %s", net["id"]) self.network_create_func(net) subnets = self.neutronclient.list_subnets() for subnet in subnets.get("subnets"): LOG.info("Syncing subnet %s", subnet["id"]) self.create_subnet(subnet)
def sync_networks(self)
sync networks. It will retrieve networks from neutron and populate them in dfa database and dcnm
2.325157
2.26218
1.027839
# Create thread for neutron notifications. neutron_thrd = utils.EventProcessingThread('Neutron_Event', self.neutron_event, 'event_handler', self._excpq) self.dfa_threads.append(neutron_thrd) # Create thread for processing notification events. qp_thrd = utils.EventProcessingThread('Event_Queue', self, 'process_queue', self._excpq) self.dfa_threads.append(qp_thrd) # Create thread for keystone notifications. keys_thrd = utils.EventProcessingThread('Keystone_Event', self.keystone_event, 'event_handler', self._excpq) self.dfa_threads.append(keys_thrd) # Create thread to process RPC calls. hb_thrd = utils.EventProcessingThread('RPC_Server', self, 'start_rpc', self._excpq) self.dfa_threads.append(hb_thrd) # Create thread to listen to dcnm network events. if self.dcnm_event is not None: dcnmL_thrd = utils.EventProcessingThread('DcnmListener', self.dcnm_event, 'process_amqp_msgs', self._excpq) self.dfa_threads.append(dcnmL_thrd) # Create periodic task to process failure cases in create/delete # networks and projects. fr_thrd = utils.PeriodicTask(interval=constants.FAIL_REC_INTERVAL, func=self.add_events, event_queue=self.pqueue, priority=self.PRI_LOW_START + 10, excq=self._excpq) # Start all the threads. for t in self.dfa_threads: t.start() # Run the periodic tasks. fr_thrd.run()
def create_threads(self)
Create threads on server.
3.569306
3.541071
1.007973
try: with session.begin(subtransactions=True): alloc = (session.query(self.model).filter_by( segmentation_id=seg_id).first()) if alloc: if alloc.allocated: # Segment already allocated return else: # Segment not allocated count = (session.query(self.model). filter_by(allocated=False, segmentation_id=seg_id).update( {"allocated": True})) if count: return alloc # Segment to create or already allocated alloc = self.model(segmentation_id=seg_id, allocated=True, source=source) session.add(alloc) except db_exc.DBDuplicateEntry: # Segment already allocated (insert failure) alloc = None return alloc
def _allocate_specified_segment(self, session, seg_id, source)
Allocate specified segment. If segment exists, then try to allocate it and return db object If segment does not exists, then try to create it and return db object If allocation/creation failed (duplicates), then return None
2.692677
2.564175
1.050114
with session.begin(subtransactions=True): hour_lapse = utils.utc_time_lapse(self.seg_timeout) count = (session.query(self.model).filter( self.model.delete_time < hour_lapse).update( {"delete_time": None})) select = (session.query(self.model).filter_by(allocated=False, delete_time=None)) # Selected segment can be allocated before update by someone else, # We retry until update success or DB_MAX_RETRIES retries for attempt in range(DB_MAX_RETRIES + 1): alloc = select.first() if not alloc: LOG.info("No segment resource available") # No resource available return count = (session.query(self.model). filter_by(segmentation_id=alloc.segmentation_id, allocated=False).update({"allocated": True, "network_id": net_id, "source": source})) if count: return alloc LOG.error("ERROR: Failed to allocate segment for net %(net)s" " source %(src)s", {'net': net_id, 'src': source})
def _allocate_segment(self, session, net_id, source)
Allocate segment from pool. Return allocated db object or None.
4.3194
4.196513
1.029283
session = db.get_session() query_str = None for sub in subnet_lst: sub_que = (self.model.subnet_address != sub) if query_str is not None: query_str = query_str & sub_que else: query_str = sub_que with session.begin(subtransactions=True): select = (session.query(self.model).filter( (self.model.allocated == 0) & query_str)) # Selected segment can be allocated before update by someone else, # We retry until update success or DB_MAX_RETRIES retries for attempt in range(DB_MAX_RETRIES + 1): alloc = select.first() if not alloc: LOG.info("No subnet resource available") return count = (session.query(self.model). filter_by(subnet_address=alloc.subnet_address, allocated=False).update({"allocated": True, "network_id": net_id})) if count: return alloc.subnet_address LOG.error("ERROR: Failed to allocate subnet for net %(net)s", {'net': net_id}) return None
def allocate_subnet(self, subnet_lst, net_id=None)
Allocate subnet from pool. Return allocated db object or None.
3.84764
3.766247
1.021611
topo_dict = params.get('columns') session = db.get_session() host = topo_dict.get('host') protocol_interface = topo_dict.get('protocol_interface') with session.begin(subtransactions=True): try: # Check if entry exists. session.query(DfaTopologyDb).filter_by( host=host, protocol_interface=protocol_interface).one() session.query(DfaTopologyDb).filter_by( host=host, protocol_interface=protocol_interface).update( topo_dict) except orm_exc.NoResultFound: LOG.info("Creating new topology entry for host " "%(host)s on Interface %(intf)s", {'host': host, 'intf': protocol_interface}) topo_disc = DfaTopologyDb( host=host, protocol_interface=protocol_interface, phy_interface=topo_dict.get('phy_interface'), created=topo_dict.get('created'), heartbeat=topo_dict.get('heartbeat'), remote_mgmt_addr=topo_dict.get('remote_mgmt_addr'), remote_system_name=topo_dict.get('remote_system_name'), remote_system_desc=topo_dict.get('remote_system_desc'), remote_port_id_mac=topo_dict.get('remote_port_id_mac'), remote_chassis_id_mac=topo_dict.get( 'remote_chassis_id_mac'), remote_port=topo_dict.get('remote_port'), remote_evb_cfgd=topo_dict.get('remote_evb_cfgd'), remote_evb_mode=topo_dict.get('remote_evb_mode'), configurations=topo_dict.get('configurations')) session.add(topo_disc) except orm_exc.MultipleResultsFound: LOG.error("More than one enty found for agent %(host)s." "Interface %(intf)s", {'host': host, 'intf': protocol_interface}) except Exception as exc: LOG.error("Exception in add_update_topology_db %s", exc)
def add_update_topology_db(self, **params)
Add or update an entry to the topology DB.
2.242071
2.226054
1.007195
topo_lst = [] for topo_obj in topology_objs: topo_dct = { 'host': topo_obj.host, 'protocol_interface': topo_obj.protocol_interface, 'phy_interface': topo_obj.phy_interface, 'created': topo_obj.created, 'heartbeat': topo_obj.heartbeat, 'remote_mgmt_addr': topo_obj.remote_mgmt_addr, 'remote_system_name': topo_obj.remote_system_name, 'remote_system_desc': topo_obj.remote_system_desc, 'remote_port_id_mac': topo_obj.remote_port_id_mac, 'remote_chassis_id_mac': topo_obj.remote_chassis_id_mac, 'remote_port': topo_obj.remote_port, 'remote_evb_cfgd': topo_obj.remote_evb_cfgd, 'remote_evb_mode': topo_obj.remote_evb_mode, 'configurations': topo_obj.configurations} topo_lst.append(topo_dct) return topo_lst
def _convert_topo_obj_dict(self, topology_objs)
Convert topology object to dict.
2.230535
2.171726
1.027079
session = db.get_session() with session.begin(subtransactions=True): try: # Check if entry exists. topo_disc = session.query(DfaTopologyDb).filter_by(**req).all() except orm_exc.NoResultFound: LOG.info("No Topology results found for %s", req) return None if dict_convert: return self._convert_topo_obj_dict(topo_disc) return topo_disc
def query_topology_db(self, dict_convert=False, **req)
Query an entry to the topology DB.
4.087077
3.960524
1.031954
session = db.get_session() with session.begin(subtransactions=True): try: rows = session.query(DfaTopologyDb).filter_by(**req).all() except orm_exc.NoResultFound: LOG.info("No Topology results found for %s", req) return try: for row in rows: session.delete(row) except Exception as exc: LOG.error("Exception raised %s", str(exc))
def delete_topology_entry(self, **req)
Delete the entries from the topology DB.
3.217366
2.982272
1.07883
reply = None if is_ncb: reply = self.run_lldptool(["-L", "-i", port_name, "-g", "ncb", "adminStatus=rxtx"]) elif is_nb: reply = self.run_lldptool(["-L", "-i", port_name, "-g", "nb", "adminStatus=rxtx"]) else: LOG.error("Both NCB and NB are not selected to " "enable LLDP") return False if reply is None: return False exp_str = "adminstatus=rxtx" if exp_str in reply.replace(" ", "").lower(): return True else: return False
def enable_lldp(self, port_name, is_ncb=True, is_nb=False)
Function to enable LLDP on the interface.
3.149725
3.183693
0.989331
reply = None if is_ncb: reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name, "-g", "ncb"]) elif is_nb: reply = self.run_lldptool(["get-tlv", "-n", "-i", port_name, "-g", "nb"]) else: LOG.error("Both NCB and NB are not selected to " "query LLDP") return reply
def get_lldp_tlv(self, port_name, is_ncb=True, is_nb=False)
Function to Query LLDP TLV on the interface.
3.00758
3.002497
1.001693
full_args = ['lldptool'] + args try: return utils.execute(full_args, root_helper=self.root_helper) except Exception as exc: LOG.error("Unable to execute %(cmd)s. " "Exception: %(exception)s", {'cmd': full_args, 'exception': str(exc)})
def run_lldptool(self, args)
Function for invoking the lldptool utility.
2.704029
2.628435
1.02876
if tlv_complete_data is None: return False, None tlv_string_split = tlv_complete_data.split(tlv_string) if len(tlv_string_split) < 2: return False, None next_tlv_list = tlv_string_split[1].split('TLV')[0] tlv_val_set = next_tlv_list.split(tlv_data_pattern) if len(tlv_val_set) < 2: return False, None return True, tlv_val_set
def _check_common_tlv_format(self, tlv_complete_data, tlv_data_pattern, tlv_string)
Check for the common TLV format.
2.086128
2.029213
1.028048
ret, parsed_val = self._check_common_tlv_format( tlv_data, "mode:", "EVB Configuration TLV") if not ret: return None mode_val = parsed_val[1].split()[0].strip() return mode_val
def get_remote_evb_mode(self, tlv_data)
Returns the EVB mode in the TLV.
7.008896
6.559681
1.068481
ret, parsed_val = self._check_common_tlv_format( tlv_data, "IPv4:", "Management Address TLV") if not ret: return None addr_fam = 'IPv4:' addr = parsed_val[1].split('\n')[0].strip() return addr_fam + addr
def get_remote_mgmt_addr(self, tlv_data)
Returns Remote Mgmt Addr from the TLV.
6.51298
6.386735
1.019767
ret, parsed_val = self._check_common_tlv_format( tlv_data, "\n", "System Description TLV") if not ret: return None return parsed_val[1].strip()
def get_remote_sys_desc(self, tlv_data)
Returns Remote Sys Desc from the TLV.
7.326642
7.16103
1.023127
ret, parsed_val = self._check_common_tlv_format( tlv_data, "\n", "System Name TLV") if not ret: return None return parsed_val[1].strip()
def get_remote_sys_name(self, tlv_data)
Returns Remote Sys Name from the TLV.
7.392744
7.114924
1.039047
ret, parsed_val = self._check_common_tlv_format( tlv_data, "\n", "Port Description TLV") if not ret: return None return parsed_val[1].strip()
def get_remote_port(self, tlv_data)
Returns Remote Port from the TLV.
8.331979
8.014379
1.039629
ret, parsed_val = self._check_common_tlv_format( tlv_data, "MAC:", "Chassis ID TLV") if not ret: return None mac = parsed_val[1].split('\n') return mac[0].strip()
def get_remote_chassis_id_mac(self, tlv_data)
Returns Remote Chassis ID MAC from the TLV.
6.495274
6.166464
1.053322
ret, parsed_val = self._check_common_tlv_format( tlv_data, "Local:", "Port ID TLV") if not ret: return None local = parsed_val[1].split('\n') return local[0].strip()
def get_remote_port_id_local(self, tlv_data)
Returns Remote Port ID Local from the TLV.
7.317631
6.974508
1.049197
if ch_grp > 0: return 'port-channel:%s' % str(ch_grp) return '%s:%s' % (intf_type.lower(), port)
def format_interface_name(intf_type, port, ch_grp=0)
Method to format interface name given type, port. Given interface type, port, and channel-group, this method formats an interface name. If channel-group is non-zero, then port-channel is configured. :param intf_type: Such as 'ethernet' or 'port-channel' :param port: unique identification -- 1/32 or 1 :ch_grp: If non-zero, ignore other params and format port-channel<ch_grp> :returns: the full formatted interface name. ex: ethernet:1/32, port-channel:1
3.505946
3.262087
1.074755
interface = interface.lower() if ch_grp != 0: intf_type = 'port-channel' port = str(ch_grp) elif ':' in interface: intf_type, port = interface.split(':') elif interface.startswith('ethernet'): interface = interface.replace(" ", "") _, intf_type, port = interface.partition('ethernet') elif interface.startswith('port-channel'): interface = interface.replace(" ", "") _, intf_type, port = interface.partition('port-channel') else: intf_type, port = 'ethernet', interface return intf_type, port
def split_interface_name(interface, ch_grp=0)
Method to split interface type, id from name. Takes an interface name or just interface suffix and returns interface type and number separately. :param interface: interface name or just suffix :param ch_grp: if non-zero, ignore interface name and return 'port-channel' grp :returns: interface type like 'ethernet' :returns: returns suffix to interface name
2.289144
2.306374
0.992529
LOG.debug('Notify Cisco cfg agent at %(host)s the message ' '%(method)s', {'host': host, 'method': method}) cctxt = self.client.prepare(server=host) cctxt.cast(context, method, payload=payload)
def _host_notification(self, context, method, payload, host)
Notify the cfg agent that is handling the hosting device.
3.632046
3.367791
1.078465
admin_context = context.is_admin and context or context.elevated() for hosting_device in hosting_devices: agents = self._dmplugin.get_cfg_agents_for_hosting_devices( admin_context, hosting_device['id'], admin_state_up=True, schedule=True) for agent in agents: LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s', {'agent_type': agent.agent_type, 'topic': agent.topic, 'host': agent.host, 'method': method}) cctxt = self.client.prepare(server=agent.host) cctxt.cast(context, method)
def _agent_notification(self, context, method, hosting_devices, operation)
Notify individual Cisco cfg agents.
3.174487
2.950423
1.075943
self._host_notification(context, 'agent_updated', {'admin_state_up': admin_state_up}, host)
def agent_updated(self, context, admin_state_up, host)
Updates cfg agent on <host> to enable or disable it.
3.766264
3.918222
0.961218
self._host_notification(context, 'hosting_devices_unassigned_from_cfg_agent', {'hosting_device_ids': ids}, host)
def hosting_devices_unassigned_from_cfg_agent(self, context, ids, host)
Notify cfg agent to no longer handle some hosting devices. This notification relieves the cfg agent in <host> of responsibility to monitor and configure hosting devices with id specified in <ids>.
4.010576
3.774185
1.062634
self._host_notification(context, 'hosting_devices_assigned_to_cfg_agent', {'hosting_device_ids': ids}, host)
def hosting_devices_assigned_to_cfg_agent(self, context, ids, host)
Notify cfg agent to now handle some hosting devices. This notification relieves the cfg agent in <host> of responsibility to monitor and configure hosting devices with id specified in <ids>.
4.237405
3.983062
1.063856
if hosting_data: self._host_notification(context, 'hosting_devices_removed', {'hosting_data': hosting_data, 'deconfigure': deconfigure}, host)
def hosting_devices_removed(self, context, hosting_data, deconfigure, host)
Notify cfg agent that some hosting devices have been removed. This notification informs the cfg agent in <host> that the hosting devices in the <hosting_data> dictionary have been removed from the hosting device pool. The <hosting_data> dictionary also contains the ids of the affected logical resources for each hosting devices:: {'hd_id1': {'routers': [id1, id2, ...], 'fw': [id1, ...], ...}, 'hd_id2': {'routers': [id3, id4, ...]}, 'fw': [id1, ...], ...}, ...} The <deconfigure> argument is True if any configurations for the logical resources should be removed from the hosting devices
3.441469
4.353104
0.790578
admin_context = context.is_admin and context or context.elevated() agents = self._dmplugin.get_cfg_agents_for_hosting_devices( admin_context, [id], admin_state_up=True, schedule=True) if agents: cctxt = self.client.prepare(server=agents[0].host) return cctxt.call(context, 'get_hosting_device_configuration', payload={'hosting_device_id': id})
def get_hosting_device_configuration(self, context, id)
Fetch configuration of hosting device with id. The configuration agent should respond with the running config of the hosting device.
3.554703
3.912704
0.908503
if pol_id not in self.policies: self.policies[pol_id] = policy self.policy_cnt += 1
def store_policy(self, pol_id, policy)
Store the policy. Policy is maintained as a dictionary of pol ID.
2.884691
2.988808
0.965164
if rule_id not in self.rules: self.rules[rule_id] = rule self.rule_cnt += 1
def store_rule(self, rule_id, rule)
Store the rules. Policy is maintained as a dictionary of Rule ID.
2.940009
2.989548
0.983429
if rule_id not in self.rules: LOG.error("No Rule id present for deleting %s", rule_id) return del self.rules[rule_id] self.rule_cnt -= 1
def delete_rule(self, rule_id)
Delete the specific Rule from dictionary indexed by rule id.
4.108333
3.602315
1.14047
if rule_id not in self.rules: LOG.error("Rule ID not present %s", rule_id) return self.rules[rule_id].update(rule)
def rule_update(self, rule_id, rule)
Update the rule.
3.218896
3.125639
1.029836
if self.fw_id is None or self.fw_id != fw_id: return False else: return True
def is_fw_present(self, fw_id)
Returns if firewall index by ID is present in dictionary.
3.327025
2.901808
1.146535
self.tenant_name = proj_name self.fw_id = fw_id self.fw_name = fw_name self.fw_created = True self.active_pol_id = pol_id self.fw_type = fw_type self.router_id = rtr_id
def create_fw(self, proj_name, pol_id, fw_id, fw_name, fw_type, rtr_id)
Fills up the local attributes when FW is created.
2.403165
2.16612
1.109433
self.fw_id = None self.fw_name = None self.fw_created = False self.active_pol_id = None
def delete_fw(self, fw_id)
Deletes the FW local attributes.
5.283417
5.33148
0.990985
if pol_id not in self.policies: LOG.error("Invalid policy %s", pol_id) return del self.policies[pol_id] self.policy_cnt -= 1
def delete_policy(self, pol_id)
Deletes the policy from the local dictionary.
3.126326
2.94687
1.060897
LOG.info("In fw_complete needed %(fw_created)s " "%(active_policy_id)s %(is_fw_drvr_created)s " "%(pol_present)s %(fw_type)s", {'fw_created': self.fw_created, 'active_policy_id': self.active_pol_id, 'is_fw_drvr_created': self.is_fw_drvr_created(), 'pol_present': self.active_pol_id in self.policies, 'fw_type': self.fw_type}) if self.active_pol_id is not None: LOG.info("In Drvr create needed %(len_policy)s %(one_rule)s", {'len_policy': len(self.policies[self.active_pol_id]['rule_dict']), 'one_rule': self.one_rule_present(self.active_pol_id)}) return self.fw_created and self.active_pol_id and ( self.is_fw_drvr_created()) and self.fw_type and ( self.active_pol_id in self.policies) and ( len(self.policies[self.active_pol_id]['rule_dict'])) > 0 and ( self.one_rule_present(self.active_pol_id))
def is_fw_complete(self)
This API returns the completion status of FW. This returns True if a FW is created with a active policy that has more than one rule associated with it and if a driver init is done successfully.
2.701079
2.498134
1.081238
pol_dict = self.policies[pol_id] for rule in pol_dict['rule_dict']: if self.is_rule_present(rule): return True return False
def one_rule_present(self, pol_id)
Returns if atleast one rule is present in the policy.
3.188031
2.934997
1.086213
fw_dict = {} if self.fw_id is None: return fw_dict fw_dict = {'rules': {}, 'tenant_name': self.tenant_name, 'tenant_id': self.tenant_id, 'fw_id': self.fw_id, 'fw_name': self.fw_name, 'firewall_policy_id': self.active_pol_id, 'fw_type': self.fw_type, 'router_id': self.router_id} # When Firewall and Policy are both deleted and the SM is doing a # retry (maybe DCNM Out partition could not be deleted) during # which without this check, it throws an exception since # self.policies is empty. This is also an issue during restart. if self.active_pol_id not in self.policies: return fw_dict pol_dict = self.policies[self.active_pol_id] for rule in pol_dict['rule_dict']: fw_dict['rules'][rule] = self.rules[rule] return fw_dict
def get_fw_dict(self)
This API creates a FW dictionary from the local attributes.
4.605667
4.480895
1.027845
if rtr_id != -1: self.router_id = rtr_id if fw_type != -1: self.fw_type = fw_type
def update_fw_params(self, rtr_id=-1, fw_type=-1)
Updates the FW parameters.
1.880071
1.830534
1.027061
if not self.fw_init: return self.dcnm_obj = dcnm_obj self.fabric.store_dcnm(dcnm_obj) self.populate_dcnm_obj(dcnm_obj)
def populate_cfg_dcnm(self, cfg, dcnm_obj)
This routine stores the DCNM object.
4.866612
4.166825
1.167943
if not self.fw_init: return self.que_obj = que_obj self.populate_event_que(que_obj)
def populate_event_queue(self, cfg, que_obj)
This routine is for storing the Event Queue obj.
6.051441
5.223993
1.158394
if not self.fw_init: return self.network_create_notif(tenant_id, tenant_name, cidr)
def network_sub_create_notif(self, tenant_id, tenant_name, cidr)
Network create notification.
4.631471
4.182916
1.107235
if not self.fw_init: return self.network_delete_notif(tenant_id, tenant_name, net_id)
def network_del_notif(self, tenant_id, tenant_name, net_id)
Network delete notification.
4.624939
4.266949
1.083898
if not self.fw_init: return self.os_helper.create_router('_'.join([fw_constants.TENANT_EDGE_RTR, tenant_name]), tenant_id, [])
def project_create_notif(self, tenant_id, tenant_name)
Tenant Create notification.
12.503172
12.154103
1.02872
if not self.fw_init: return rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, tenant_name]) self.os_helper.delete_router_by_name(rtr_name, tenant_id)
def project_delete_notif(self, tenant_id, tenant_name)
Tenant Delete notification.
6.081144
6.0696
1.001902
is_fw_virt = self.is_device_virtual() ret = self.fabric.prepare_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_CREATE_INIT) if not ret: LOG.error("Prepare Fabric failed") return else: self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_CREATE_DONE)) ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: self.fwid_attr[tenant_id].fw_drvr_created(True) self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS') LOG.info("FW device create returned success for tenant %s", tenant_id) else: LOG.error("FW device create returned failure for tenant %s", tenant_id)
def _create_fw_fab_dev_te(self, tenant_id, drvr_name, fw_dict)
Prepares the Fabric and configures the device. This routine calls the fabric class to prepare the fabric when a firewall is created. It also calls the device manager to configure the device. It updates the database with the final result.
4.043329
3.638544
1.111249
if fw_dict.get('fw_type') == fw_constants.FW_TENANT_EDGE: self._create_fw_fab_dev_te(tenant_id, drvr_name, fw_dict)
def _create_fw_fab_dev(self, tenant_id, drvr_name, fw_dict)
This routine calls the Tenant Edge routine if FW Type is TE.
3.182695
2.184443
1.456982
if self.fwid_attr[tenant_id].is_fw_drvr_create_needed(): fw_dict = self.fwid_attr[tenant_id].get_fw_dict() try: with self.fwid_attr[tenant_id].mutex_lock: ret = self.add_fw_db(fw_dict.get('fw_id'), fw_dict, fw_constants.RESULT_FW_CREATE_INIT) if not ret: LOG.error("Adding FW DB failed for tenant %s", tenant_id) return self._create_fw_fab_dev(tenant_id, drvr_name, fw_dict) except Exception as exc: LOG.error("Exception raised in create fw %s", str(exc))
def _check_create_fw(self, tenant_id, drvr_name)
Creates the Firewall, if all conditions are met. This function first checks if all the configuration are done for a FW to be launched. After that it creates the FW entry in the DB. After that, it calls the routine to prepare the fabric and configure the device.
3.850203
3.791516
1.015478
is_fw_virt = self.is_device_virtual() if self.fwid_attr[tenant_id].is_fw_drvr_created(): ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if not ret: LOG.error("Error in delete_fabric_fw device for tenant " "%s", tenant_id) return False else: self.fwid_attr[tenant_id].fw_drvr_created(False) self.update_fw_db_dev_status(fw_dict.get('fw_id'), '') ret = self.fabric.delete_fabric_fw(tenant_id, fw_dict, is_fw_virt, fw_constants.RESULT_FW_DELETE_INIT) if not ret: LOG.error("Error in delete_fabric_fw for tenant %s", tenant_id) return False self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_DELETE_DONE)) self.delete_fw(fw_dict.get('fw_id')) return True
def _delete_fw_fab_dev(self, tenant_id, drvr_name, fw_dict)
Deletes the Firewall. This routine calls the fabric class to delete the fabric when a firewall is deleted. It also calls the device manager to unconfigure the device. It updates the database with the final result.
3.172913
3.059928
1.036924
fw_dict = self.fwid_attr[tenant_id].get_fw_dict() ret = False try: with self.fwid_attr[tenant_id].mutex_lock: self.update_fw_db_final_result(fw_dict.get('fw_id'), ( fw_constants.RESULT_FW_DELETE_INIT)) ret = self._delete_fw_fab_dev(tenant_id, drvr_name, fw_dict) except Exception as exc: LOG.error("Exception raised in delete fw %s", str(exc)) return ret
def _check_delete_fw(self, tenant_id, drvr_name)
Deletes the Firewall, if all conditioms are met. This function after modifying the DB with delete operation status, calls the routine to remove the fabric cfg from DB and unconfigure the device.
4.6602
4.490486
1.037794
if self.fwid_attr[tenant_id].is_fw_complete(): fw_dict = self.fwid_attr[tenant_id].get_fw_dict() self.modify_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict)
def _check_update_fw(self, tenant_id, drvr_name)
Update the Firewall config by calling the driver. This function calls the device manager routine to update the device with modified FW cfg.
4.340162
4.605569
0.942373
fw = data.get('firewall') tenant_id = fw.get('tenant_id') fw_name = fw.get('name') fw_id = fw.get('id') fw_pol_id = fw.get('firewall_policy_id') admin_state = fw.get('admin_state_up') rtr_id = None if 'router_ids' in fw and len(fw.get('router_ids')) != 0: rtr_id = fw.get('router_ids')[0] if not admin_state: LOG.debug("Admin state disabled") return name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id) rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, name]) fw_rtr_name = self.os_helper.get_rtr_name(rtr_id) fw_type = None if fw_rtr_name == rtr_name: fw_type = fw_constants.FW_TENANT_EDGE if tenant_id not in self.fwid_attr: self.fwid_attr[tenant_id] = FwMapAttr(tenant_id) tenant_obj = self.fwid_attr[tenant_id] tenant_obj.create_fw(name, fw_pol_id, fw_id, fw_name, fw_type, rtr_id) self.tenant_db.store_fw_tenant(fw_id, tenant_id) if not cache: self._check_create_fw(tenant_id, drvr_name) if fw_pol_id is not None and not ( tenant_obj.is_policy_present(fw_pol_id)): pol_data = self.os_helper.get_fw_policy(fw_pol_id) if pol_data is not None: self.fw_policy_create(pol_data, cache=cache)
def _fw_create(self, drvr_name, data, cache)
Firewall create routine. This function updates its local cache with FW parameters. It checks if local cache has information about the Policy associated with the FW. If not, it means a restart has happened. It retrieves the policy associated with the FW by calling Openstack API's and calls t he policy create internal routine.
2.946405
2.900409
1.015859
LOG.debug("FW create %s", data) try: self._fw_create(fw_name, data, cache) except Exception as exc: LOG.error("Exception in fw_create %s", str(exc))
def fw_create(self, data, fw_name=None, cache=False)
Top level FW create function.
3.018234
2.982442
1.012001
fw = data.get('firewall') tenant_id = fw.get('tenant_id') if self.fwid_attr[tenant_id].is_fw_complete() or \ self.fwid_attr[tenant_id].is_fw_drvr_create_needed(): prev_info_complete = True else: prev_info_complete = False tenant_obj = self.fwid_attr[tenant_id] if 'router_ids' in fw and len(fw.get('router_ids')) != 0: rtr_id = fw.get('router_ids')[0] name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id) rtr_name = '_'.join([fw_constants.TENANT_EDGE_RTR, name]) fw_rtr_name = self.os_helper.get_rtr_name(rtr_id) fw_type = None if fw_rtr_name == rtr_name: fw_type = fw_constants.FW_TENANT_EDGE tenant_obj.update_fw_params(rtr_id, fw_type) if not prev_info_complete: self._check_create_fw(tenant_id, drvr_name)
def _fw_update(self, drvr_name, data)
Update routine for the Firewall. Check if FW is already cfgd using the below function if self.fwid_attr[tenant_id].is_fw_complete() or is_fw_drvr_create_needed(): The above two functions will take care of whether FW is already cfgd or about to be cfgd in case of error. If yes, this may be a change in policies attached to FW. If no, do a check, create after storing the parameters like rtr_id.
3.785465
2.909294
1.301163
LOG.debug("FW Update %s", data) self._fw_update(fw_name, data)
def fw_update(self, data, fw_name=None)
Top level FW update function.
5.469037
5.457367
1.002138
fw_id = data.get('firewall_id') tenant_id = self.tenant_db.get_fw_tenant(fw_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] ret = self._check_delete_fw(tenant_id, drvr_name) if ret: tenant_obj.delete_fw(fw_id) self.tenant_db.del_fw_tenant(fw_id)
def _fw_delete(self, drvr_name, data)
Firewall Delete routine. This function calls routines to remove FW from fabric and device. It also updates its local cache.
2.917564
2.891271
1.009094
fw_rule = data.get('firewall_rule') rule = {'protocol': fw_rule.get('protocol'), 'source_ip_address': fw_rule.get('source_ip_address'), 'destination_ip_address': fw_rule.get( 'destination_ip_address'), 'source_port': fw_rule.get('source_port'), 'destination_port': fw_rule.get('destination_port'), 'action': fw_rule.get('action'), 'enabled': fw_rule.get('enabled'), 'name': fw_rule.get('name')} return rule
def _fw_rule_decode_store(self, data)
Misc function to decode the firewall rule from Openstack.
1.546688
1.488199
1.039302
tenant_id = data.get('firewall_rule').get('tenant_id') fw_rule = data.get('firewall_rule') rule = self._fw_rule_decode_store(data) fw_pol_id = fw_rule.get('firewall_policy_id') rule_id = fw_rule.get('id') if tenant_id not in self.fwid_attr: self.fwid_attr[tenant_id] = FwMapAttr(tenant_id) self.fwid_attr[tenant_id].store_rule(rule_id, rule) if not cache: self._check_create_fw(tenant_id, drvr_name) self.tenant_db.store_rule_tenant(rule_id, tenant_id) if fw_pol_id is not None and not ( self.fwid_attr[tenant_id].is_policy_present(fw_pol_id)): pol_data = self.os_helper.get_fw_policy(fw_pol_id) if pol_data is not None: self.fw_policy_create(pol_data, cache=cache)
def _fw_rule_create(self, drvr_name, data, cache)
Firewall Rule create routine. This function updates its local cache with rule parameters. It checks if local cache has information about the Policy associated with the rule. If not, it means a restart has happened. It retrieves the policy associated with the FW by calling Openstack API's and calls t he policy create internal routine.
2.961354
2.946225
1.005135
LOG.debug("FW Rule create %s", data) self._fw_rule_create(fw_name, data, cache)
def fw_rule_create(self, data, fw_name=None, cache=False)
Top level rule creation routine.
4.257284
4.490091
0.948151
rule_id = data.get('firewall_rule_id') tenant_id = self.tenant_db.get_rule_tenant(rule_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] # Guess actual FW/policy need not be deleted if this is the active # rule, Openstack does not allow it to be deleted tenant_obj.delete_rule(rule_id) self.tenant_db.del_rule_tenant(rule_id)
def _fw_rule_delete(self, drvr_name, data)
Function that updates its local cache after a rule is deleted.
4.825978
4.852943
0.994444
LOG.debug("FW Rule delete %s", data) self._fw_rule_delete(fw_name, data)
def fw_rule_delete(self, data, fw_name=None)
Top level rule delete function.
4.492867
4.614396
0.973663
LOG.debug("FW Update %s", data) tenant_id = data.get('firewall_rule').get('tenant_id') fw_rule = data.get('firewall_rule') rule = self._fw_rule_decode_store(data) rule_id = fw_rule.get('id') if tenant_id not in self.fwid_attr or not ( self.fwid_attr[tenant_id].is_rule_present(rule_id)): LOG.error("Incorrect update info for tenant %s", tenant_id) return self.fwid_attr[tenant_id].rule_update(rule_id, rule) self._check_update_fw(tenant_id, drvr_name)
def _fw_rule_update(self, drvr_name, data)
Firewall Rule update routine. Function to decode the updated rules and call routines that in turn calls the device routines to update rules.
3.429719
3.361847
1.020189
LOG.debug("FW Update Debug") self._fw_rule_update(fw_name, data)
def fw_rule_update(self, data, fw_name=None)
Top level rule update routine.
8.819412
8.716752
1.011777
policy_id = data.get('firewall_policy_id') tenant_id = self.tenant_db.get_policy_tenant(policy_id) if tenant_id not in self.fwid_attr: LOG.error("Invalid tenant id for FW delete %s", tenant_id) return tenant_obj = self.fwid_attr[tenant_id] # Guess actual FW need not be deleted since if this is the active # policy, Openstack does not allow it to be deleted tenant_obj.delete_policy(policy_id) self.tenant_db.del_policy_tenant(policy_id)
def _fw_policy_delete(self, drvr_name, data)
Routine to delete the policy from local cache.
4.660741
4.628488
1.006968
LOG.debug("FW Policy Debug") self._fw_policy_delete(fw_name, data)
def fw_policy_delete(self, data, fw_name=None)
Top level policy delete routine.
8.416628
7.772999
1.082803
policy = {} fw_policy = data.get('firewall_policy') tenant_id = fw_policy.get('tenant_id') LOG.info("Creating policy for tenant %s", tenant_id) policy_id = fw_policy.get('id') policy_name = fw_policy.get('name') pol_rule_dict = fw_policy.get('firewall_rules') if tenant_id not in self.fwid_attr: self.fwid_attr[tenant_id] = FwMapAttr(tenant_id) policy['name'] = policy_name policy['rule_dict'] = pol_rule_dict self.fwid_attr[tenant_id].store_policy(policy_id, policy) if not cache: self._check_create_fw(tenant_id, drvr_name) self.tenant_db.store_policy_tenant(policy_id, tenant_id) for rule in pol_rule_dict: rule_id = rule if not self.fwid_attr[tenant_id].is_rule_present(rule_id): rule_data = self.os_helper.get_fw_rule(rule_id) if rule_data is not None: self.fw_rule_create(rule_data, cache=cache)
def _fw_policy_create(self, drvr_name, data, cache)
Firewall Policy create routine. This function updates its local cache with policy parameters. It checks if local cache has information about the rules associated with the policy. If not, it means a restart has happened. It retrieves the rules associated with the policy by calling Openstack API's and calls the rule create internal routine.
2.578811
2.60658
0.989347
LOG.debug("FW Policy Debug") self._fw_policy_create(fw_name, data, cache)
def fw_policy_create(self, data, fw_name=None, cache=False)
Top level policy create routine.
7.180631
6.661674
1.077902
rule.update({'tenant_id': tenant_id, 'id': rule_id, 'firewall_policy_id': policy_id}) fw_rule_data = {'firewall_rule': rule} return fw_rule_data
def convert_fwdb_event_msg(self, rule, tenant_id, rule_id, policy_id)
Convert the Firewall DB to a event message format. From inputs from DB, this will create a FW rule dictionary that resembles the actual data from Openstack when a rule is created. This is usually called after restart, in order to populate local cache.
2.73528
2.609388
1.048246
fw_dict = {'tenant_id': tenant_id, 'name': name, 'id': fw_id, 'firewall_policy_id': policy_id, 'admin_state_up': True} fw_data = {'firewall': fw_dict} return fw_data
def convert_fwdb(self, tenant_id, name, policy_id, fw_id)
Convert the Firewall DB to a query response. From FWDB inputs, this will create a FW message that resembles the actual data from Openstack, when a query for FW is done.
2.19475
2.349667
0.934069
fw_dict = self.get_all_fw_db() LOG.info("Populating FW Mgr Local Cache") for fw_id in fw_dict: fw_data = fw_dict.get(fw_id) tenant_id = fw_data.get('tenant_id') rule_dict = fw_data.get('rules').get('rules') policy_id = fw_data.get('rules').get('firewall_policy_id') for rule in rule_dict: fw_evt_data = self.convert_fwdb_event_msg(rule_dict.get(rule), tenant_id, rule, policy_id) LOG.info("Populating Rules for tenant %s", tenant_id) self.fw_rule_create(fw_evt_data, cache=True) fw_os_data = self.os_helper.get_fw(fw_id) # If enabler is stopped and FW is deleted, then the above routine # will fail. if fw_os_data is None: fw_os_data = self.convert_fwdb(tenant_id, fw_data.get('name'), policy_id, fw_id) LOG.info("Populating FW for tenant %s", tenant_id) self.fw_create(fw_os_data, cache=True) if fw_data.get('device_status') == 'SUCCESS': self.fwid_attr[tenant_id].fw_drvr_created(True) else: self.fwid_attr[tenant_id].fw_drvr_created(False) return fw_dict
def populate_local_cache(self)
This populates the local cache after reading the Database. It calls the appropriate rule create, fw create routines. It doesn't actually call the routine to prepare the fabric or cfg the device since it will be handled by retry module.
3.618286
3.427705
1.0556
result = fw_data.get('result').split('(')[0] is_fw_virt = self.is_device_virtual() # Fabric portion if result == fw_constants.RESULT_FW_CREATE_INIT: name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id) ret = self.fabric.retry_failure(tenant_id, name, fw_dict, is_fw_virt, result) if not ret: LOG.error("Retry failure returned fail for tenant %s", tenant_id) return else: result = fw_constants.RESULT_FW_CREATE_DONE self.update_fw_db_final_result(fw_dict.get('fw_id'), result) # Device portion if result == fw_constants.RESULT_FW_CREATE_DONE: if fw_data.get('device_status') != 'SUCCESS': ret = self.create_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: self.fwid_attr[tenant_id].fw_drvr_created(True) self.update_fw_db_dev_status(fw_dict.get('fw_id'), 'SUCCESS') LOG.info("Retry failue return success for create" " tenant %s", tenant_id)
def retry_failure_fab_dev_create(self, tenant_id, fw_data, fw_dict)
This module calls routine in fabric to retry the failure cases. If device is not successfully cfg/uncfg, it calls the device manager routine to cfg/uncfg the device.
4.414667
4.404857
1.002227
result = fw_data.get('result').split('(')[0] name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id) fw_dict['tenant_name'] = name is_fw_virt = self.is_device_virtual() if result == fw_constants.RESULT_FW_DELETE_INIT: if self.fwid_attr[tenant_id].is_fw_drvr_created(): ret = self.delete_fw_device(tenant_id, fw_dict.get('fw_id'), fw_dict) if ret: # Device portion self.update_fw_db_dev_status(fw_dict.get('fw_id'), '') self.fwid_attr[tenant_id].fw_drvr_created(False) LOG.info("Retry failue dev return success for delete" " tenant %s", tenant_id) else: return name = dfa_dbm.DfaDBMixin.get_project_name(self, tenant_id) ret = self.fabric.retry_failure(tenant_id, name, fw_dict, is_fw_virt, result) if not ret: LOG.error("Retry failure returned fail for tenant %s", tenant_id) return result = fw_constants.RESULT_FW_DELETE_DONE self.update_fw_db_final_result(fw_dict.get('fw_id'), result) self.delete_fw(fw_dict.get('fw_id')) self.fwid_attr[tenant_id].delete_fw(fw_dict.get('fw_id')) self.tenant_db.del_fw_tenant(fw_dict.get('fw_id'))
def retry_failure_fab_dev_delete(self, tenant_id, fw_data, fw_dict)
Retry the failure cases for delete. This module calls routine in fabric to retry the failure cases for delete. If device is not successfully cfg/uncfg, it calls the device manager routine to cfg/uncfg the device.
3.766239
3.738473
1.007427
for tenant_id in self.fwid_attr: try: with self.fwid_attr[tenant_id].mutex_lock: if self.fwid_attr[tenant_id].is_fw_drvr_create_needed(): fw_dict = self.fwid_attr[tenant_id].get_fw_dict() if fw_dict: fw_obj, fw_data = self.get_fw(fw_dict.get('fw_id')) self.retry_failure_fab_dev_create(tenant_id, fw_data, fw_dict) else: LOG.error("FW data not found for tenant %s", tenant_id) except Exception as exc: LOG.error("Exception in retry failure create %s", str(exc))
def fw_retry_failures_create(self)
This module is called for retrying the create cases.
3.891508
3.901417
0.99746
rule_dict = fw_data.get('rules').get('rules') fw_dict = {'fw_id': fw_data.get('fw_id'), 'fw_name': fw_data.get('name'), 'firewall_policy_id': fw_data.get('firewall_policy_id'), 'fw_type': fw_data.get('fw_type'), 'router_id': fw_data.get('router_id'), 'rules': {}} for rule in rule_dict: fw_dict['rules'][rule] = rule_dict.get(rule) return fw_dict
def fill_fw_dict_from_db(self, fw_data)
This routine is called to create a local fw_dict with data from DB.
2.0854
2.0741
1.005448