code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
LOG.info('Starting process_amqp_msgs...') while True: (mtd_fr, hdr_fr, body) = (None, None, None) try: if self.consume_channel: (mtd_fr, hdr_fr, body) = self.consume_channel.basic_get( self._dcnm_queue_name) if mtd_fr: # Queue has messages. LOG.info('RX message: %s', body) self._cb_dcnm_msg(mtd_fr, body) self.consume_channel.basic_ack(mtd_fr.delivery_tag) else: # Queue is empty. try: self._conn.sleep(1) except AttributeError: time.sleep(1) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() tb_str = traceback.format_exception(exc_type, exc_value, exc_tb) LOG.exception("Failed to read from queue: %(queue)s " "%(exc_type)s, %(exc_value)s, %(exc_tb)s.", { 'queue': self._dcnm_queue_name, 'exc_type': exc_type, 'exc_value': exc_value, 'exc_tb': tb_str})
def process_amqp_msgs(self)
Process AMQP queue messages. It connects to AMQP server and calls callbacks to process DCNM events, i.e. routing key containing '.cisco.dcnm.', once they arrive in the queue.
2.740808
2.524428
1.085715
ping_cmd = ['ping', '-c', '5', '-W', '1', '-i', '0.2', ip] try: linux_utils.execute(ping_cmd, check_exit_code=True) return True except RuntimeError: LOG.warning("Cannot ping ip address: %s", ip) return False
def _is_pingable(ip)
Checks whether an IP address is reachable by pinging. Use linux utils to execute the ping (ICMP ECHO) command. Sends 5 packets with an interval of 0.2 seconds and timeout of 1 seconds. Runtime error implies unreachability else IP is pingable. :param ip: IP to check :return: bool - True or False depending on pingability.
2.88662
2.837688
1.017244
cs = socket.socket() try: cs.connect((ip, port)) cs.close() return True except socket.error: return False
def _can_connect(ip, port)
Checks if a TCP port at IP address is possible to connect to
2.577896
2.479514
1.039678
res = [] for hd_id in self.hosting_devices_backlog: hd = self.hosting_devices_backlog[hd_id]['hd'] if hd['hd_state'] == cc.HD_DEAD: res.append(hd['id']) return res
def get_dead_hosting_devices_info(self)
Get a list of hosting devices that have been marked dead :return: List of dead hosting device ids
4.755941
4.195632
1.133546
wait_time = datetime.timedelta( seconds=cfg.CONF.cfg_agent.hosting_device_dead_timeout) resp = [] for hd_id in self.hosting_devices_backlog: hd = self.hosting_devices_backlog[hd_id]['hd'] display_hd = True if hd_state_filter is not None: if hd['hd_state'] == hd_state_filter: display_hd = True else: display_hd = False if display_hd: created_time = hd['created_at'] boottime = datetime.timedelta(seconds=hd['booting_time']) backlogged_at = hd['backlog_insertion_ts'] booted_at = created_time + boottime dead_at = backlogged_at + wait_time resp.append({'host id': hd['id'], 'hd_state': hd['hd_state'], 'created at': str(created_time), 'backlogged at': str(backlogged_at), 'estimate booted at': str(booted_at), 'considered dead at': str(dead_at)}) else: continue return resp
def get_monitored_hosting_devices_info(self, hd_state_filter=None)
This function returns a list of all hosting devices monitored by this agent
3.221173
3.232588
0.996469
ret_val = False hd = hosting_device hd_id = hosting_device['id'] hd_mgmt_ip = hosting_device['management_ip_address'] dead_hd_list = self.get_dead_hosting_devices_info() if hd_id in dead_hd_list: LOG.debug("Hosting device: %(hd_id)s@%(ip)s is already marked as" " Dead. It is assigned as non-reachable", {'hd_id': hd_id, 'ip': hd_mgmt_ip}) return False # Modifying the 'created_at' to a date time object if it is not if not isinstance(hd['created_at'], datetime.datetime): hd['created_at'] = datetime.datetime.strptime(hd['created_at'], '%Y-%m-%d %H:%M:%S') if _is_pingable(hd_mgmt_ip): LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.", {'hd_id': hd_id, 'ip': hd_mgmt_ip}) hd['hd_state'] = cc.HD_ACTIVE ret_val = True else: LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.", {'hd_id': hd_id, 'ip': hd_mgmt_ip}) hd['hd_state'] = cc.HD_NOT_RESPONDING ret_val = False if self.enable_heartbeat is True or ret_val is False: self.backlog_hosting_device(hd) return ret_val
def is_hosting_device_reachable(self, hosting_device)
Check the hosting device which hosts this resource is reachable. If the resource is not reachable, it is added to the backlog. * heartbeat revision We want to enqueue all hosting-devices into the backlog for monitoring purposes adds key/value pairs to hd (aka hosting_device dictionary) _is_pingable : if it returns true, hd['hd_state']='Active' _is_pingable : if it returns false, hd['hd_state']='Unknown' :param hosting_device : dict of the hosting device :returns: True if device is reachable, else None
2.579023
2.386315
1.080755
return super(CiscoRouterPlugin, self).create_floatingip( context, floatingip, initial_status=bc.constants.FLOATINGIP_STATUS_DOWN)
def create_floatingip(self, context, floatingip)
Create floating IP. :param context: Neutron request context :param floatingip: data for the floating IP being created :returns: A floating IP object on success As the l3 router plugin asynchronously creates floating IPs leveraging the l3 agent and l3 cfg agent, the initial status for the floating IP object will be DOWN.
4.814982
6.133826
0.784988
in_sub = self.get_in_subnet_id(tenant_id) out_sub = self.get_out_subnet_id(tenant_id) # Modify Hard coded Name fixme subnet_lst = set() subnet_lst.add(in_sub) subnet_lst.add(out_sub) ret = self.os_helper.add_intf_router(router_id, tenant_id, subnet_lst) return ret, in_sub, out_sub
def attach_intf_router(self, tenant_id, tenant_name, router_id)
Routine to attach the interface to the router.
4.002516
3.927556
1.019086
router_id = None if tenant_id in self.tenant_dict: router_id = self.tenant_dict.get(tenant_id).get('router_id') if not router_id: router_list = self.os_helper.get_rtr_by_name( 'FW_RTR_' + tenant_name) if len(router_list) > 0: router_id = router_list[0].get('id') return router_id
def get_router_id(self, tenant_id, tenant_name)
Retrieve the router ID.
2.478997
2.411476
1.028
in_sub = self.get_in_subnet_id(tenant_id) out_sub = self.get_out_subnet_id(tenant_id) subnet_lst = set() subnet_lst.add(in_sub) subnet_lst.add(out_sub) router_id = self.get_router_id(tenant_id, tenant_name) if router_id: ret = self.os_helper.delete_intf_router(tenant_name, tenant_id, router_id, subnet_lst) if not ret: LOG.error("Failed to delete router intf id %(rtr)s, " "tenant %(tenant)s", {'rtr': router_id, 'tenant': tenant_id}) return ret LOG.error("Invalid router ID, can't delete interface from " "router")
def delete_intf_router(self, tenant_id, tenant_name, router_id)
Routine to delete the router.
2.702158
2.680855
1.007946
max_get_router_info_retry = True attempt = 0 while max_get_router_info_retry: port_data = self.os_helper.get_router_port_subnet(subnet_id) if port_data is None: LOG.error("Unable to get router port data") return None if port_data.get('binding:host_id') == '': time.sleep(3) attempt += 1 if attempt > 3: max_get_router_info_retry = False LOG.error("Unable to get router binding host data, " "Max attempts reached") else: max_get_router_info_retry = False if status is 'up': event_type = 'service.vnic.create' else: event_type = 'service.vnic.delete' vnic_data = {'status': status, 'mac': port_data.get('mac_address'), 'segid': seg, 'host': port_data.get('binding:host_id')} if vnic_data['host'] == '': LOG.error("Null host for seg %(seg)s subnet %(subnet)s", {'seg': seg, 'subnet': subnet_id}) if self.tenant_dict.get(tenant_id).get('host') is None: LOG.error("Null host for tenant %(tenant)s seg %(seg)s " "subnet %(subnet)s", {'tenant': tenant_id, 'seg': seg, 'subnet': subnet_id}) return None else: vnic_data['host'] = self.tenant_dict.get(tenant_id).get('host') else: self.tenant_dict[tenant_id]['host'] = vnic_data['host'] vm_ip = port_data.get('fixed_ips')[0].get('ip_address') vnic_data.update({'port_id': port_data.get('id'), 'network_id': net_id, 'vm_name': 'FW_SRVC_RTR_' + tenant_name, 'vm_ip': vm_ip, 'vm_uuid': router_id, 'gw_mac': None, 'fwd_mod': 'anycast_gateway'}) payload = {'service': vnic_data} data = (event_type, payload) return data
def prepare_router_vm_msg(self, tenant_id, tenant_name, router_id, net_id, subnet_id, seg, status)
Prepare the message to be sent to Event queue for VDP trigger. This is actually called for a subnet add to a router. This function prepares a VM's VNIC create/delete message.
2.521791
2.507385
1.005745
data = self.prepare_router_vm_msg(tenant_id, tenant_name, router_id, net_id, subnet_id, seg, status) if data is None: return False timestamp = time.ctime() pri = Q_PRIORITY LOG.info("Sending native FW data into queue %(data)s", {'data': data}) self.que_obj.put((pri, timestamp, data)) return True
def send_router_port_msg(self, tenant_id, tenant_name, router_id, net_id, subnet_id, seg, status)
Sends the router port message to the queue.
5.840548
5.652194
1.033324
ip_list = self.os_helper.get_subnet_nwk_excl(tenant_id, arg_dict.get('excl_list')) srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id) ret = self.dcnm_obj.update_partition_static_route( arg_dict.get('tenant_name'), fw_const.SERV_PART_NAME, ip_list, vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile, service_node_ip=srvc_node_ip) if not ret: LOG.error("Unable to update DCNM ext profile with static " "route %s", arg_dict.get('router_id')) self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'), arg_dict.get('router_id')) return False return True
def update_dcnm_partition_static_route(self, tenant_id, arg_dict)
Add static route in DCNM's partition. This gets pushed to the relevant leaf switches.
4.393506
4.544832
0.966704
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id) out_seg, out_vlan = self.get_out_seg_vlan(tenant_id) in_ip_dict = self.get_in_ip_addr(tenant_id) out_ip_dict = self.get_out_ip_addr(tenant_id) excl_list = [in_ip_dict.get('subnet'), out_ip_dict.get('subnet')] arg_dict = {'tenant_id': tenant_id, 'tenant_name': data.get('tenant_name'), 'in_seg': in_seg, 'in_vlan': in_vlan, 'out_seg': out_seg, 'out_vlan': out_vlan, 'router_id': data.get('router_id'), 'in_sub': in_sub, 'out_sub': out_sub, 'in_gw': in_ip_dict.get('gateway'), 'out_gw': out_ip_dict.get('gateway'), 'excl_list': excl_list} return arg_dict
def _create_arg_dict(self, tenant_id, data, in_sub, out_sub)
Create the argument dictionary.
1.712142
1.698033
1.008309
LOG.debug("In creating Native FW data is %s", data) # TODO(padkrish): # Check if router is already added and only then add, needed for # restart cases since native doesn't have a special DB ret, in_sub, out_sub = self.attach_intf_router(tenant_id, data.get('tenant_name'), data.get('router_id')) if not ret: LOG.error("Native FW: Attach intf router failed for tenant " "%s", tenant_id) return False self.create_tenant_dict(tenant_id, data.get('router_id')) arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub) # Program DCNM to update profile's static IP address on OUT part ret = self.update_dcnm_partition_static_route(tenant_id, arg_dict) if not ret: return False # Program the default GW in router namespace ret = self.program_default_gw(tenant_id, arg_dict) if not ret: return False # Program router namespace to have all tenant networks to be routed # to IN service network ret = self.program_next_hop(tenant_id, arg_dict) if not ret: return False # Send message for router port auto config for in service nwk ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'up') if not ret: return False # Send message for router port auto config for out service nwk return self.send_out_router_port_msg(tenant_id, arg_dict, 'up')
def _create_fw(self, tenant_id, data)
Internal routine that gets called when a FW is created.
5.303424
5.246812
1.01079
try: return self._create_fw(tenant_id, data) except Exception as exc: LOG.error("Failed to create FW for device native, tenant " "%(tenant)s data %(data)s Exc %(exc)s", {'tenant': tenant_id, 'data': data, 'exc': exc}) return False
def create_fw(self, tenant_id, data)
Top level routine called when a FW is created.
3.339134
3.34263
0.998954
LOG.debug("In Delete fw data is %s", data) in_sub = self.get_in_subnet_id(tenant_id) out_sub = self.get_out_subnet_id(tenant_id) arg_dict = self._create_arg_dict(tenant_id, data, in_sub, out_sub) if arg_dict.get('router_id') is None: LOG.error("Router ID unknown for tenant %s", tenant_id) return False if tenant_id not in self.tenant_dict: self.create_tenant_dict(tenant_id, arg_dict.get('router_id')) ret = self.send_in_router_port_msg(tenant_id, arg_dict, 'down') if not ret: return False ret = self.send_out_router_port_msg(tenant_id, arg_dict, 'down') if not ret: return False # Usually sending message to queue doesn't fail!!! router_ret = self.delete_intf_router(tenant_id, arg_dict.get('tenant_name'), arg_dict.get('router_id')) if not router_ret: LOG.error("Unable to delete router for tenant %s, error case", tenant_id) return router_ret del self.tenant_dict[tenant_id] return router_ret
def _delete_fw(self, tenant_id, data)
Internal routine called when a FW is deleted.
3.147162
3.10885
1.012324
try: ret = self._delete_fw(tenant_id, data) return ret except Exception as exc: LOG.error("Failed to delete FW for device native, tenant " "%(tenant)s data %(data)s Exc %(exc)s", {'tenant': tenant_id, 'data': data, 'exc': exc}) return False
def delete_fw(self, tenant_id, data)
Top level routine called when a FW is deleted.
3.518137
3.409197
1.031955
in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_ip = in_ip_dict.get('subnet') if in_gw is None: LOG.error("No FW service GW present") return False out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip = out_ip_dict.get('subnet') # Program DCNM to update profile's static IP address on OUT part excl_list = [] excl_list.append(in_ip) excl_list.append(out_ip) subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list, excl_part=True) # This count is for telling DCNM to insert the static route in a # particular position. Total networks created - exclusive list as # above - the network that just got created. srvc_node_ip = self.get_out_srvc_node_ip_addr(tenant_id) ret = self.dcnm_obj.update_partition_static_route( tenant_name, fw_const.SERV_PART_NAME, subnet_lst, vrf_prof=self.cfg.firewall.fw_service_part_vrf_profile, service_node_ip=srvc_node_ip) if not ret: LOG.error("Unable to update DCNM ext profile with static " "route") return False return True
def _program_dcnm_static_route(self, tenant_id, tenant_name)
Program DCNM Static Route.
5.100576
5.047732
1.010469
router_id = self.get_router_id(tenant_id, tenant_name) if not router_id: LOG.error("Rout ID not present for tenant") return False ret = self._program_dcnm_static_route(tenant_id, tenant_name) if not ret: LOG.error("Program DCNM with static routes failed " "for router %s", router_id) return False # Program router namespace to have this network to be routed # to IN service network in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') if in_gw is None: LOG.error("No FW service GW present") return False ret = self.os_helper.program_rtr_nwk_next_hop(router_id, in_gw, cidr) if not ret: LOG.error("Unable to program default router next hop %s", router_id) return False return True
def network_create_notif(self, tenant_id, tenant_name, cidr)
Tenant Network create Notification. Restart is not supported currently for this. fixme(padkrish).
4.373846
4.346532
1.006284
router_id = self.get_router_id(tenant_id, tenant_name) if router_id is None: LOG.error("Rout ID not present for tenant") return False ret = self._program_dcnm_static_route(tenant_id, tenant_name) if not ret: LOG.error("Program DCNM with static routes failed for " "router %s", router_id) return False # Program router namespace to have this network to be routed # to IN service network in_ip_dict = self.get_in_ip_addr(tenant_id) in_gw = in_ip_dict.get('gateway') in_ip = in_ip_dict.get('subnet') if in_gw is None: LOG.error("No FW service GW present") return False out_ip_dict = self.get_out_ip_addr(tenant_id) out_ip = out_ip_dict.get('subnet') excl_list = [] excl_list.append(in_ip) excl_list.append(out_ip) subnet_lst = self.os_helper.get_subnet_nwk_excl(tenant_id, excl_list, excl_part=True) ret = self.os_helper.remove_rtr_nwk_next_hop(router_id, in_gw, subnet_lst, excl_list) if not ret: LOG.error("Unable to program default router next hop %s", router_id) return False return True
def network_delete_notif(self, tenant_id, tenant_name, network_id)
Tenant Network delete Notification. Restart is not supported currently for this. fixme(padkrish).
3.480114
3.455637
1.007083
mgmt_port = None if mgmt_context and mgmt_context.get('mgmt_nw_id') and tenant_id: # Create port for mgmt interface p_spec = {'port': { 'tenant_id': tenant_id, 'admin_state_up': True, 'name': 'mgmt', 'network_id': mgmt_context['mgmt_nw_id'], 'mac_address': bc.constants.ATTR_NOT_SPECIFIED, 'fixed_ips': self._mgmt_subnet_spec(context, mgmt_context), 'device_id': "", # Use device_owner attribute to ensure we can query for these # ports even before Nova has set device_id attribute. 'device_owner': complementary_id}} try: mgmt_port = self._core_plugin.create_port(context, p_spec) except n_exc.NeutronException as e: LOG.error('Error %s when creating management port. ' 'Cleaning up.', e) self.delete_hosting_device_resources( context, tenant_id, mgmt_port) mgmt_port = None # We are setting the 'ports' to an empty list as it is expected by # the callee: device_handling_db._create_svc_vm_hosting_devices() return {'mgmt_port': mgmt_port, 'ports': []}
def create_hosting_device_resources(self, context, complementary_id, tenant_id, mgmt_context, max_hosted)
Create resources for a hosting device in a plugin specific way.
3.821724
3.816879
1.001269
mgmt_port = None # Ports for hosting device may not yet have 'device_id' set to # Nova assigned uuid of VM instance. However, those ports will still # have 'device_owner' attribute set to complementary_id. Hence, we # use both attributes in the query to ensure we find all ports. query = context.session.query(models_v2.Port) query = query.filter(expr.or_( models_v2.Port.device_id == id, models_v2.Port.device_owner == complementary_id)) for port in query: if port['network_id'] != mgmt_nw_id: raise Exception else: mgmt_port = port return {'mgmt_port': mgmt_port}
def get_hosting_device_resources(self, context, id, complementary_id, tenant_id, mgmt_nw_id)
Returns information about all resources for a hosting device.
4.376646
4.486369
0.975543
if mgmt_port is not None: try: self._cleanup_hosting_port(context, mgmt_port['id']) except n_exc.NeutronException as e: LOG.error("Unable to delete port:%(port)s after %(tries)d" " attempts due to exception %(exception)s. " "Skipping it", {'port': mgmt_port['id'], 'tries': DELETION_ATTEMPTS, 'exception': str(e)})
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port, **kwargs)
Deletes resources for a hosting device in a plugin specific way.
3.272375
3.325789
0.983939
hosting_port = port_db.hosting_info.hosting_port if hosting_port: try: self._dev_mgr.svc_vm_mgr.interface_attach(hosting_device_id, hosting_port.id) LOG.debug("Setup logical port completed for port:%s", port_db.id) except nova_exc.Conflict as e: # VM is still in vm_state building LOG.debug("Failed to attach interface - spawn thread " "error %(error)s", {'error': str(e)}) self._gt_pool.spawn_n(self._attach_hosting_port, hosting_device_id, hosting_port.id) except Exception as e: LOG.error("Failed to attach interface mapped to port:" "%(p_id)s on hosting device:%(hd_id)s due to " "error %(error)s", {'p_id': hosting_port.id, 'hd_id': hosting_device_id, 'error': str(e)})
def setup_logical_port_connectivity(self, context, port_db, hosting_device_id)
Establishes connectivity for a logical port. This is done by hot plugging the interface(VIF) corresponding to the port from the VM.
3.691551
3.757545
0.982437
if port_db is None or port_db.get('id') is None: LOG.warning("Port id is None! Cannot remove port " "from hosting_device:%s", hosting_device_id) return hosting_port_id = port_db.hosting_info.hosting_port.id try: self._dev_mgr.svc_vm_mgr.interface_detach(hosting_device_id, hosting_port_id) self._gt_pool.spawn_n(self._cleanup_hosting_port, context, hosting_port_id) LOG.debug("Teardown logicalport completed for port:%s", port_db.id) except Exception as e: LOG.error("Failed to detach interface corresponding to port:" "%(p_id)s on hosting device:%(hd_id)s due to " "error %(error)s", {'p_id': hosting_port_id, 'hd_id': hosting_device_id, 'error': str(e)})
def teardown_logical_port_connectivity(self, context, port_db, hosting_device_id)
Removes connectivity for a logical port. Unplugs the corresponding data interface from the VM.
3.572495
3.491239
1.023274
l3admin_tenant_id = self._dev_mgr.l3_tenant_id() hostingport_name = 'hostingport_' + port_db['id'][:8] p_spec = {'port': { 'tenant_id': l3admin_tenant_id, 'admin_state_up': True, 'name': hostingport_name, 'network_id': port_db['network_id'], 'mac_address': bc.constants.ATTR_NOT_SPECIFIED, 'fixed_ips': [], 'device_id': '', 'device_owner': '', 'port_security_enabled': False}} try: hosting_port = self._core_plugin.create_port(context, p_spec) except n_exc.NeutronException as e: LOG.error('Error %s when creating hosting port' 'Cleaning up.', e) self.delete_hosting_device_resources( context, l3admin_tenant_id, hosting_port) hosting_port = None finally: if hosting_port: return {'allocated_port_id': hosting_port['id'], 'allocated_vlan': None} else: return None
def allocate_hosting_port(self, context, router_id, port_db, network_type, hosting_device_id)
Allocates a hosting port for a logical port. We create a hosting port for the router port
3.041393
3.044636
0.998935
self.update_server(disabled=True) if retain_port: return self.update_device(disabled=True) if self.conf.dhcp_delete_namespaces and self.network.namespace: ns_ip = ip_lib.IPWrapper(self.root_helper, self.network.namespace) try: ns_ip.netns.delete(self.network.namespace) except RuntimeError: msg = _('Failed trying to delete namespace: %s') LOG.exception(msg, self.network.namespace)
def disable(self, retain_port=False)
Teardown DHCP. Disable DHCP for this network by updating the remote server and then destroying any local device and namespace.
3.586576
3.062575
1.171098
if "_devices" in globals(): return global _devices confs_dir = os.path.abspath(os.path.normpath(cfg.CONF.dhcp_confs)) for netid in os.listdir(confs_dir): conf_dir = os.path.join(confs_dir, netid) intf_filename = os.path.join(conf_dir, 'interface') try: with open(intf_filename, 'r') as f: ifname = f.read() _devices[netid] = ifname except IOError: LOG.error('Unable to read interface file: %s', intf_filename) LOG.debug("Recovered device %s for network %s'", ifname, netid)
def recover_devices(cls)
Track devices. Creates global dict to track device names across driver invocations and populates based on current devices configured on the system.
3.507638
3.252175
1.078552
super(SimpleCpnrDriver, cls).check_version() model.configure_pnr() cls.recover_networks() ver = model.get_version() if ver < cls.MIN_VERSION: LOG.warning("CPNR version does not meet minimum requirements, " "expected: %(ever)f, actual: %(rver)f", {'ever': cls.MIN_VERSION, 'rver': ver}) return ver
def check_version(cls)
Checks server version against minimum required version.
7.663496
7.269874
1.054144
global _networks sup = super(SimpleCpnrDriver, cls) superkeys = sup.existing_dhcp_networks(conf) return set(_networks.keys()) & set(superkeys)
def existing_dhcp_networks(cls, conf)
Return a list of existing networks ids that we have configs for.
10.701483
9.442668
1.133311
id = self.network.id net = model.Network.from_neutron(self.network) if id not in _networks: if disabled: return _networks[id] = net _networks[id].create() elif disabled: _networks[id].delete() del _networks[id] else: _networks[id].update(net) _networks[id] = net
def _unsafe_update_server(self, disabled=False)
Update server with latest network configuration.
3.468235
3.316906
1.045624
try: body = {'network': {'name': name, 'tenant_id': tenant_id, 'admin_state_up': True}} netw = self.neutronclient.create_network(body=body) net_dict = netw.get('network') net_id = net_dict.get('id') except Exception as exc: LOG.error("Failed to create network %(name)s, Exc %(exc)s", {'name': name, 'exc': str(exc)}) return None, None try: if gw is None: body = {'subnet': {'cidr': subnet, 'ip_version': 4, 'network_id': net_id, 'tenant_id': tenant_id, 'enable_dhcp': False}} else: body = {'subnet': {'cidr': subnet, 'ip_version': 4, 'network_id': net_id, 'tenant_id': tenant_id, 'enable_dhcp': False, 'gateway_ip': gw}} subnet_ret = self.neutronclient.create_subnet(body=body) subnet_dict = subnet_ret.get('subnet') subnet_id = subnet_dict.get('id') except Exception as exc: LOG.error("Failed to create subnet %(sub)s, exc %(exc)s", {'sub': subnet, 'exc': str(exc)}) try: self.neutronclient.delete_network(net_id) except Exception as exc: LOG.error("Failed to delete network %(net)s, exc %(exc)s", {'net': net_id, 'exc': str(exc)}) return None, None return net_id, subnet_id
def create_network(self, name, tenant_id, subnet, gw=None)
Create the openstack network, including the subnet.
1.550002
1.547555
1.001581
try: self.neutronclient.delete_subnet(subnet_id) except Exception as exc: LOG.error("Failed to delete subnet %(sub)s exc %(exc)s", {'sub': subnet_id, 'exc': str(exc)}) return try: self.neutronclient.delete_network(net_id) except Exception as exc: LOG.error("Failed to delete network %(name)s exc %(exc)s", {'name': name, 'exc': str(exc)})
def delete_network(self, name, tenant_id, subnet_id, net_id)
Delete the openstack subnet and network.
1.793198
1.819803
0.98538
try: body = {'network_id': net_id} subnet_list = self.neutronclient.list_subnets(body=body) subnet_list = subnet_list.get('subnets') for subnet in subnet_list: if subnet.get('network_id') == net_id: subnet_id = subnet.get('id') self.neutronclient.delete_subnet(subnet_id) except Exception as exc: LOG.error("Failed to delete subnet for net %(net)s " "Exc %(exc)s", {'net': net_id, 'exc': str(exc)}) return False try: self.neutronclient.delete_network(net_id) except Exception as exc: LOG.error("Failed to delete network %(net)s Exc %(exc)s", {'net': net_id, 'exc': str(exc)}) return False return True
def delete_network_all_subnets(self, net_id)
Delete the openstack network including all its subnets.
1.71004
1.752951
0.975521
try: subnet_list = self.neutronclient.list_subnets(body={}) subnet_dat = subnet_list.get('subnets') for sub in subnet_dat: if sub.get('cidr') == subnet_addr: return True return False except Exception as exc: LOG.error("Failed to list subnet %(sub)s, Exc %(exc)s", {'sub': subnet_addr, 'exc': str(exc)}) return False
def is_subnet_present(self, subnet_addr)
Returns if a subnet is present.
2.792872
2.680208
1.042035
body = {} subnet_cidrs = [] try: subnet_list = self.neutronclient.list_subnets(body=body) subnet_dat = subnet_list.get('subnets') for sub in subnet_dat: if no_mask: subnet_cidrs.append(sub.get('cidr').split('/')[0]) else: subnet_cidrs.append(sub.get('cidr')) except Exception as exc: LOG.error("Failed to list subnet Exc %s", str(exc)) return subnet_cidrs
def get_all_subnets_cidr(self, no_mask=False)
Returns all the subnets.
2.590806
2.528092
1.024807
try: subnet_list = self.neutronclient.list_subnets(network_id=net) subnet_dat = subnet_list.get('subnets') return subnet_dat except Exception as exc: LOG.error("Failed to list subnet net %(net)s, Exc: %(exc)s", {'net': net, 'exc': str(exc)}) return None
def get_subnets_for_net(self, net)
Returns the subnets in a network.
3.108608
2.965948
1.048099
try: subnet_list = self.neutronclient.list_subnets(id=subnet_id) subnet_dat = subnet_list.get('subnets')[0] return subnet_dat.get('cidr') except Exception as exc: LOG.error("Failed to list subnet for ID %(subnet)s, " "exc %(exc)s", {'subnet': subnet_id, 'exc': exc}) return None
def get_subnet_cidr(self, subnet_id)
retrieve the CIDR associated with a subnet, given its ID.
2.581094
2.618152
0.985846
try: body = {} net_list = self.neutronclient.list_networks(body=body) for net in net_list: if net.get('name').find(sub_name) != -1: self.delete_network_all_subnets(net.get('net_id')) except Exception as exc: LOG.error("Failed to get network by subname %(name)s, " "Exc %(exc)s", {'name': sub_name, 'exc': str(exc)})
def delete_network_subname(self, sub_name)
Delete the network by part of its name, use with caution.
3.305296
3.303701
1.000483
ret_net_lst = [] try: body = {} net_list = self.neutronclient.list_networks(body=body) net_list = net_list.get('networks') for net in net_list: if net.get('name') == nwk_name: ret_net_lst.append(net) except Exception as exc: LOG.error("Failed to get network by name %(name)s, " "Exc %(exc)s", {'name': nwk_name, 'exc': str(exc)}) return ret_net_lst
def get_network_by_name(self, nwk_name)
Search for a openstack network by name.
2.289066
2.187532
1.046415
ret_net_lst = [] try: net_list = self.neutronclient.list_networks(body={}) for net in net_list.get('networks'): if net.get('tenant_id') == tenant_id: ret_net_lst.append(net) except Exception as exc: LOG.error("Failed to get network by tenant %(tenant)s, " "Exc %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) return ret_net_lst
def get_network_by_tenant(self, tenant_id)
Returns the network of a given tenant.
2.379848
2.407917
0.988343
upd_rtr_list = [] try: rtr_list = self.neutronclient.list_routers() for rtr in rtr_list.get('routers'): if rtr_name == rtr['name']: upd_rtr_list.append(rtr) except Exception as exc: LOG.error("Failed to get router by name %(name)s, " "Exc %(exc)s", {'name': rtr_name, 'exc': str(exc)}) return upd_rtr_list
def get_rtr_by_name(self, rtr_name)
Search a router by its name.
2.186069
2.156796
1.013573
try: body = {'router': {'name': name, 'tenant_id': tenant_id, 'admin_state_up': True}} router = self.neutronclient.create_router(body=body) rout_dict = router.get('router') rout_id = rout_dict.get('id') except Exception as exc: LOG.error("Failed to create router with name %(name)s" " Exc %(exc)s", {'name': name, 'exc': str(exc)}) return None ret = self.add_intf_router(rout_id, tenant_id, subnet_lst) if not ret: try: ret = self.neutronclient.delete_router(rout_id) except Exception as exc: LOG.error("Failed to delete router %(name)s, Exc %(exc)s", {'name': name, 'exc': str(exc)}) return None return rout_id
def create_router(self, name, tenant_id, subnet_lst)
Create a openstack router and add the interfaces.
2.05705
2.029892
1.013379
try: for subnet_id in subnet_lst: body = {'subnet_id': subnet_id} intf = self.neutronclient.add_interface_router(rout_id, body=body) intf.get('port_id') except Exception as exc: LOG.error("Failed to create router intf ID %(id)s," " Exc %(exc)s", {'id': rout_id, 'exc': str(exc)}) return False return True
def add_intf_router(self, rout_id, tenant_id, subnet_lst)
Add the interfaces to a router.
3.077171
3.038785
1.012632
ret = self.delete_intf_router(name, tenant_id, rout_id, subnet_lst) if not ret: return False try: ret = self.neutronclient.delete_router(rout_id) except Exception as exc: LOG.error("Failed to delete router %(name)s ret %(ret)s " "Exc %(exc)s", {'name': name, 'ret': str(ret), 'exc': str(exc)}) return False return True
def delete_router(self, name, tenant_id, rout_id, subnet_lst)
Delete the openstack router. Delete the router and remove the interfaces attached to it.
2.580622
2.58673
0.997639
try: for subnet_id in subnet_lst: body = {'subnet_id': subnet_id} intf = self.neutronclient.remove_interface_router(rout_id, body=body) intf.get('id') except Exception as exc: LOG.error("Failed to delete router interface %(name)s, " " Exc %(exc)s", {'name': name, 'exc': str(exc)}) return False return True
def delete_intf_router(self, name, tenant_id, rout_id, subnet_lst)
Delete the openstack router and remove the interfaces attached.
3.030437
3.069774
0.987185
try: routers = self.neutronclient.list_routers() rtr_list = routers.get('routers') for rtr in rtr_list: if rtr_name == rtr['name']: self.neutronclient.delete_router(rtr['id']) except Exception as exc: LOG.error("Failed to get and delete router by name %(name)s, " "Exc %(exc)s", {'name': rtr_name, 'exc': str(exc)}) return False return True
def delete_router_by_name(self, rtr_name, tenant_id)
Delete the openstack router and its interfaces given its name. The interfaces should be already removed prior to calling this function.
2.128478
2.218795
0.959295
try: body = {} self.neutronclient.show_router(router_id, body=body) except Exception as exc: LOG.error("Failed to show router interface %(id)s " "Exc %(exc)s", {'id': router_id, 'exc': str(exc)}) return
def get_router_intf(self, router_id)
Retrieve the router interfaces. Incomplete, TODO(padkrish).
3.218257
3.176955
1.013
try: body = {} router = self.neutronclient.show_router(router_id, body=body) return router.get('router').get('name') except Exception as exc: LOG.error("Failed to show router interface %(id)s " "Exc %(exc)s", {'id': router_id, 'exc': str(exc)})
def get_rtr_name(self, router_id)
Retrieve the router name. Incomplete.
3.252012
3.178951
1.022983
if rout_id is None: return None args = ['ip', 'netns', 'list'] try: ns_list = utils.execute(args, root_helper=self.root_helper) except Exception as exc: LOG.error("Unable to find the namespace list Exception %s", exc) return None for ns in ns_list.split(): if 'router' in ns and rout_id in ns: return ns
def find_rtr_namespace(self, rout_id)
Find the namespace associated with the router.
3.132939
3.004486
1.042754
if namespace is None: namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error("Unable to find namespace for router %s", rout_id) return False final_args = ['ip', 'netns', 'exec', namespace] + args try: utils.execute(final_args, root_helper=self.root_helper) except Exception as e: LOG.error("Unable to execute %(cmd)s. " "Exception: %(exception)s", {'cmd': final_args, 'exception': e}) return False return True
def program_rtr(self, args, rout_id, namespace=None)
Execute the command against the namespace.
2.255589
2.193885
1.028125
args = ['route', 'add', 'default', 'gw', gw] ret = self.program_rtr(args, rout_id) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
def program_rtr_default_gw(self, tenant_id, rout_id, gw)
Program the default gateway of a router.
4.432785
4.353201
1.018282
net_list = self.get_network_by_tenant(tenant_id) ret_subnet_list = [] for net in net_list: if excl_part: name = net.get('name') part = name.partition('::')[2] if part: continue subnet_lst = self.get_subnets_for_net(net.get('id')) for subnet_elem in subnet_lst: subnet = subnet_elem.get('cidr').split('/')[0] subnet_and_mask = subnet_elem.get('cidr') if subnet not in excl_list: ret_subnet_list.append(subnet_and_mask) return ret_subnet_list
def get_subnet_nwk_excl(self, tenant_id, excl_list, excl_part=False)
Retrieve the subnets of a network. Get the subnets inside a network after applying the exclusion list.
2.463663
2.516846
0.978869
namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error("Unable to find namespace for router %s", rout_id) return False net_list = self.get_network_by_tenant(tenant_id) for net in net_list: subnet_lst = self.get_subnets_for_net(net.get('id')) for subnet_elem in subnet_lst: subnet = subnet_elem.get('cidr').split('/')[0] subnet_and_mask = subnet_elem.get('cidr') if subnet not in excl_list: args = ['route', 'add', '-net', subnet_and_mask, 'gw', next_hop] ret = self.program_rtr(args, rout_id, namespace=namespace) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
def program_rtr_all_nwk_next_hop(self, tenant_id, rout_id, next_hop, excl_list)
Program the next hop for all networks of a tenant.
2.803023
2.741034
1.022615
namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error("Unable to find namespace for router %s", rout_id) return False args = ['route', 'add', '-net', cidr, 'gw', next_hop] ret = self.program_rtr(args, rout_id, namespace=namespace) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
def program_rtr_nwk_next_hop(self, rout_id, next_hop, cidr)
Program the next hop for all networks of a tenant.
3.128213
3.227878
0.969124
namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error("Unable to find namespace for router %s", rout_id) return False args = ['ip', 'route'] ret = self.program_rtr_return(args, rout_id, namespace=namespace) if ret is None: LOG.error("Get routes return None %s", rout_id) return False routes = ret.split('\n') concat_lst = subnet_lst + excl_list for rout in routes: if len(rout) == 0: continue nwk = rout.split()[0] if nwk == 'default': continue nwk_no_mask = nwk.split('/')[0] if nwk_no_mask not in concat_lst and nwk not in concat_lst: args = ['route', 'del', '-net', nwk, 'gw', next_hop] ret = self.program_rtr(args, rout_id, namespace=namespace) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
def remove_rtr_nwk_next_hop(self, rout_id, next_hop, subnet_lst, excl_list)
Remove the next hop for all networks of a tenant.
2.903955
2.853769
1.017586
fw = None try: fw = self.neutronclient.show_firewall(fw_id) except Exception as exc: LOG.error("Failed to get firewall list for id %(id)s, " "Exc %(exc)s", {'id': fw_id, 'exc': str(exc)}) return fw
def get_fw(self, fw_id)
Return the Firewall given its ID.
2.997152
2.839656
1.055463
rule = None try: rule = self.neutronclient.show_firewall_rule(rule_id) except Exception as exc: LOG.error("Failed to get firewall rule for id %(id)s " "Exc %(exc)s", {'id': rule_id, 'exc': str(exc)}) return rule
def get_fw_rule(self, rule_id)
Return the firewall rule, given its ID.
2.594782
2.446025
1.060816
policy = None try: policy = self.neutronclient.show_firewall_policy(policy_id) except Exception as exc: LOG.error("Failed to get firewall plcy for id %(id)s " "Exc %(exc)s", {'id': policy_id, 'exc': str(exc)}) return policy
def get_fw_policy(self, policy_id)
Return the firewall policy, given its ID.
2.990584
2.814512
1.062558
details = router.pop(ha.DETAILS, {}) if details == ATTR_NOT_SPECIFIED: details = {} res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED), ha.DETAILS: details} if not is_attr_set(res[ha.ENABLED]): res[ha.ENABLED] = router_type['ha_enabled_by_default'] if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled: raise ha.HADisabled() if not res[ha.ENABLED]: return res if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)): details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms: raise ha.HADisabledHAType(ha_type=details[ha.TYPE]) if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL, ATTR_NOT_SPECIFIED)): details[ha.REDUNDANCY_LEVEL] = ( cfg.CONF.ha.default_ha_redundancy_level) if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY, ATTR_NOT_SPECIFIED)): details[ha.PROBE_CONNECTIVITY] = ( cfg.CONF.ha.connectivity_probing_enabled_by_default) if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)): details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)): details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval return res
def _ensure_create_ha_compliant(self, router, router_type)
To be called in create_router() BEFORE router is created in DB.
2.162066
2.133575
1.013353
if (ha.ENABLED not in ha_settings or not ha_settings[ha.ENABLED]): new_router[ha.HA] = {ha.ENABLED: False} return ha_spec = ha_settings[ha.DETAILS] priority = ha_spec.get(ha.PRIORITY, DEFAULT_MASTER_PRIORITY) with context.session.begin(subtransactions=True): r_ha_s_db = RouterHASetting( router_id=new_router['id'], ha_type=ha_spec[ha.TYPE], redundancy_level=ha_spec[ha.REDUNDANCY_LEVEL], priority=priority, probe_connectivity=ha_spec[ha.PROBE_CONNECTIVITY], probe_target=ha_spec[ha.PROBE_TARGET], probe_interval=ha_spec[ha.PROBE_INTERVAL]) context.session.add(r_ha_s_db) if r_ha_s_db.probe_connectivity and r_ha_s_db.probe_target is None: LOG.warning("Connectivity probing for high-availability is " "enabled but probe target is not specified. Please" " configure option \'default_probe_target\'.") e_context = context.elevated() if new_router_db.gw_port: # generate ha settings and extra port for router gateway (VIP) port gw_port = self._core_plugin._make_port_dict(new_router_db.gw_port) self._create_ha_group(e_context, new_router, gw_port, r_ha_s_db) self._add_redundancy_routers(e_context, 1, ha_spec[ha.REDUNDANCY_LEVEL] + 1, new_router, ports or [], r_ha_s_db) if expire_db: context.session.expire(new_router_db) self._extend_router_dict_ha(new_router, new_router_db)
def _create_redundancy_routers(self, context, new_router, ha_settings, new_router_db, ports=None, expire_db=False)
To be called in create_router() AFTER router has been created in DB.
2.942595
2.93345
1.003117
if r_hd_binding_db.role == ROUTER_ROLE_HA_REDUNDANCY: return {ha.ENABLED: False} auto_enable_ha = r_hd_binding_db.router_type.ha_enabled_by_default requested_ha_details = router.pop(ha.DETAILS, {}) # If ha_details are given then ha is assumed to be enabled even if # it is not explicitly specified or if auto_enable_ha says so. # Note that None is used to indicate that request did not include any # ha information was provided! requested_ha_enabled = router.pop( ha.ENABLED, True if requested_ha_details or auto_enable_ha is True else None) res = {} ha_currently_enabled = current_router.get(ha.ENABLED, False) # Note: must check for 'is True' as None implies attribute not given if requested_ha_enabled is True or ha_currently_enabled is True: if not cfg.CONF.ha.ha_support_enabled: raise ha.HADisabled() curr_ha_details = current_router.get(ha.DETAILS, {}) if ha.TYPE in requested_ha_details: requested_ha_type = requested_ha_details[ha.TYPE] if (ha.TYPE in curr_ha_details and requested_ha_type != curr_ha_details[ha.TYPE]): raise ha.HATypeCannotBeChanged() elif requested_ha_type in cfg.CONF.ha.disabled_ha_mechanisms: raise ha.HADisabledHAType(ha_type=requested_ha_type) if requested_ha_enabled: res[ha.ENABLED] = requested_ha_enabled if requested_ha_details: res[ha.DETAILS] = requested_ha_details elif requested_ha_enabled is False: res[ha.ENABLED] = False return res
def _ensure_update_ha_compliant(self, router, current_router, r_hd_binding_db)
To be called in update_router() BEFORE router has been updated in DB.
3.684659
3.713123
0.992334
if not router[ha.ENABLED]: # No HA currently enabled so we're done return e_context = context.elevated() # since gateway is about to change the ha group for the current gateway # is removed, a new one will be created later self._delete_ha_group(e_context, router_db.gw_port_id) # teardown connectivity for the gw ports on the redundancy routers # and remove those ports as new ones will be created later rr_ids = [] for r_b_db in router_db.redundancy_bindings: if plugging_driver is not None: plugging_driver.teardown_logical_port_connectivity( e_context, r_b_db.redundancy_router.gw_port, r_b_db.redundancy_router.hosting_info.hosting_device_id) self._update_router_no_notify( e_context, r_b_db.redundancy_router_id, {'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}}) rr_ids.append(r_b_db.redundancy_router_id) self.notify_routers_updated(e_context, rr_ids)
def _teardown_redundancy_router_gw_connectivity(self, context, router, router_db, plugging_driver)
To be called in update_router() if the router gateway is to change BEFORE router has been updated in DB .
4.458307
4.309701
1.034482
priority = (DEFAULT_MASTER_PRIORITY + (start_index - 1) * PRIORITY_INCREASE_STEP) r = copy.deepcopy(user_visible_router) # No tenant_id so redundancy routers are hidden from user r['tenant_id'] = '' name = r['name'] redundancy_r_ids = [] for i in range(start_index, stop_index): del r['id'] # We don't replicate the user visible router's routes, instead # they are populated to redundancy routers for get router(s) ops r.pop('routes', None) # Redundancy routers will never have a route spec themselves # The redundancy routers must have HA disabled r[ha.ENABLED] = False r['name'] = name + REDUNDANCY_ROUTER_SUFFIX + str(i) # set role so that purpose of this router can be easily determined r[routerrole.ROUTER_ROLE_ATTR] = ROUTER_ROLE_HA_REDUNDANCY gw_info = r[EXTERNAL_GW_INFO] if gw_info and gw_info['external_fixed_ips']: # Ensure ip addresses are not specified as they cannot be # same as visible router's ip addresses. for e_fixed_ip in gw_info['external_fixed_ips']: e_fixed_ip.pop('ip_address', None) r = self.create_router(context, {'router': r}) LOG.debug("Created redundancy router %(index)d with router id " "%(r_id)s", {'index': i, 'r_id': r['id']}) priority += PRIORITY_INCREASE_STEP r_b_b = RouterRedundancyBinding( redundancy_router_id=r['id'], priority=priority, user_router_id=user_visible_router['id']) context.session.add(r_b_b) redundancy_r_ids.append(r['id']) for port_db in ports or []: port = self._core_plugin._make_port_dict(port_db) self._add_redundancy_router_interfaces( context, user_visible_router, None, port, redundancy_r_ids, ha_settings_db, create_ha_group)
def _add_redundancy_routers(self, context, start_index, stop_index, user_visible_router, ports=None, ha_settings_db=None, create_ha_group=True)
Creates a redundancy router and its interfaces on the specified subnets.
3.98582
4.002322
0.995877
subnets_info = [{'subnet_id': port['fixed_ips'][0]['subnet_id']} for port in ports] for r_id in router_ids: for i in range(len(subnets_info)): self.remove_router_interface(context, r_id, subnets_info[i]) LOG.debug("Removed interface on %(s_id)s to redundancy router " "with %(r_id)s", {'s_id': ports[i]['network_id'], 'r_id': r_id}) # There is only one ha group per network so only delete once if delete_ha_groups and r_id == router_ids[0]: self._delete_ha_group(context, ports[i]['id']) self.delete_router(context, r_id) LOG.debug("Deleted redundancy router %s", r_id)
def _remove_redundancy_routers(self, context, router_ids, ports, delete_ha_groups=False)
Deletes all interfaces of the specified redundancy routers and then the redundancy routers themselves.
2.538098
2.467406
1.02865
e_context = context.elevated() for binding in router_db.redundancy_bindings: self.delete_router(e_context, binding.redundancy_router_id) LOG.debug("Deleted redundancy router %s", binding.redundancy_router_id) if router_db.gw_port_id: # delete ha settings and extra port for gateway (VIP) port self._delete_ha_group(e_context, router_db.gw_port_id)
def _delete_redundancy_routers(self, context, router_db)
To be called in delete_router() BEFORE router has been deleted in DB. The router should have not interfaces.
3.4351
3.553553
0.966666
router_id = router['id'] if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) if ha_settings_db is None: return e_context = context.elevated() rr_ids = self._get_redundancy_router_ids(e_context, router_id) port_info_list = self._core_plugin.get_ports( e_context, filters={'device_id': rr_ids, 'network_id': [port['network_id']]}, fields=['device_id', 'id']) for port_info in port_info_list: self._core_plugin.update_port(e_context, port_info['id'], modified_port_data) self._update_hidden_port(e_context, port['id'], modified_port_data)
def _update_redundancy_router_interfaces(self, context, router, port, modified_port_data, redundancy_router_ids=None, ha_settings_db=None)
To be called when the router interfaces are updated, like in the case of change in port admin_state_up status
2.206857
2.225875
0.991456
ha_settings = self._get_ha_settings_by_router_id(context, router_id) if ha_settings is None or old_port is None: return e_context = context.elevated() rr_ids = self._get_redundancy_router_ids(e_context, router_id) port_info_list = self._core_plugin.get_ports( e_context, filters={'device_id': rr_ids, 'network_id': [old_port['network_id']]}, fields=['device_id', 'fixed_ips', 'id']) subnet_id = old_port['fixed_ips'][0]['subnet_id'] for port_info in port_info_list: if port_info['fixed_ips'][0]['subnet_id'] == subnet_id: interface_info = {'port_id': port_info['id']} self.remove_router_interface(e_context, port_info['device_id'], interface_info) self._delete_ha_group(e_context, old_port['id'])
def _remove_redundancy_router_interfaces(self, context, router_id, old_port)
To be called in delete_router_interface() BEFORE interface has been removed from router in DB.
2.10622
2.082833
1.011229
if ha_settings_db is None: ha_settings_db = self._get_ha_settings_by_router_id(context, router_id) if ha_settings_db is None: return e_context = context.elevated() router_ids = [] for r_id in (redundancy_router_ids or self._get_redundancy_router_ids(e_context, router_id)): router_ids.append(r_id) return router_ids
def _redundancy_routers_for_floatingip( self, context, router_id, redundancy_router_ids=None, ha_settings_db=None)
To be called in update_floatingip() to get the redundant router ids.
2.202019
2.097862
1.049649
r_r_b = self._get_redundancy_router_bindings( context, redundancy_router_id=router['id']) if not r_r_b: if router[ha.ENABLED]: # The router is a user visible router with HA enabled. user_router_id = router['id'] fips = [] else: # The router is a user visible router with HA disabled. # Nothing more to do here. return else: # The router is a redundancy router. # Need to fetch floatingip configurations from user visible router # so they can be added to the redundancy routers. user_router_id = r_r_b[0].user_router_id fips = self.get_floatingips(context, {'router_id': [user_router_id]}) if router['id'] != user_router_id: # We add the HA settings from user visible router to # its redundancy routers. user_router_db = self._get_router(context, user_router_id) self._extend_router_dict_ha(router, user_router_db) # The interfaces of the user visible router must use the # IP configuration of the extra ports in the HA groups. hag_dbs = self._get_subnet_id_indexed_ha_groups(context, user_router_id) e_context = context.elevated() if router.get('gw_port'): modified_interfaces = [] interface_port = self._populate_port_ha_information( e_context, router['gw_port'], router['id'], hag_dbs, user_router_id, modified_interfaces) if not interface_port: # The router has a gw_port but cannot find the port info yet # so mark this router to have incomplete info and bail. # The cfg_agent puts this in the updated_routers to ask again. router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE return if modified_interfaces: router['gw_port'] = interface_port modified_interfaces = [] for itfc in router.get(bc.constants.INTERFACE_KEY, []): interface_port = self._populate_port_ha_information( e_context, itfc, router['id'], hag_dbs, user_router_id, modified_interfaces) if not interface_port: # the router has interfaces but cannot find the port info yet # so mark this router to have incomplete info and bail # the cfg_agent will put this in the updated_list to ask again router['status'] = cisco_constants.ROUTER_INFO_INCOMPLETE return if modified_interfaces: router[bc.constants.INTERFACE_KEY] = modified_interfaces if fips: router[bc.constants.FLOATINGIP_KEY] = fips
def _populate_ha_information(self, context, router)
To be called when router information, including router interface list, (for the l3_cfg_agent) has been collected so it is extended with ha information.
3.484005
3.411952
1.021118
port = {'port': { 'tenant_id': '', # intentionally not set 'network_id': network_id, 'mac_address': ATTR_NOT_SPECIFIED, 'fixed_ips': fixed_ips, 'device_id': device_id, 'device_owner': port_type, 'admin_state_up': True, 'name': ''}} if extensions.is_extension_supported(self._core_plugin, "dns-integration"): port['port'].update(dns_name='') core_plugin = bc.get_plugin() return core_plugin.create_port(context, port)
def _create_hidden_port(self, context, network_id, device_id, fixed_ips, port_type=DEVICE_OWNER_ROUTER_INTF)
Creates port used specially for HA purposes.
2.9082
2.864727
1.015176
gw_port = orm.aliased(models_v2.Port, name="gw_port") routerport_qry = context.session.query( RouterPort.router_id, models_v2.IPAllocation.ip_address).join( models_v2.Port, models_v2.IPAllocation).filter( models_v2.Port.network_id == internal_port['network_id'], RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS), models_v2.IPAllocation.subnet_id == internal_subnet['id'] ).join(gw_port, gw_port.device_id == RouterPort.router_id).filter( gw_port.network_id == external_network_id, gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW ).distinct() # Ensure that redundancy routers (in a ha group) are not returned, # since only the user visible router should have floatingips. # This can be done by checking that the id of routers does not # appear in the 'redundancy_router_id' column in the # 'cisco_router_redundancy_bindings' table. routerport_qry = routerport_qry.outerjoin( RouterRedundancyBinding, RouterRedundancyBinding.redundancy_router_id == RouterPort.router_id) routerport_qry = routerport_qry.filter( RouterRedundancyBinding.redundancy_router_id == expr.null()) first_router_id = None for router_id, interface_ip in routerport_qry: if interface_ip == internal_subnet['gateway_ip']: return router_id if not first_router_id: first_router_id = router_id if first_router_id: return first_router_id raise l3_exceptions.ExternalGatewayForFloatingIPNotFound( subnet_id=internal_subnet['id'], external_network_id=external_network_id, port_id=internal_port['id'])
def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id)
We need to over-load this function so that we only return the user visible router and never its redundancy routers (as they never have floatingips associated with them).
2.687536
2.495017
1.077161
for cnt in self.res: used = self.res.get(cnt).get('used') if used < self.res.get(cnt).get('quota'): self.res[cnt]['used'] = used + 1 self.res[cnt]['fw_id_lst'].append(fw_id) return self.res[cnt].get('obj_dict'), ( self.res[cnt].get('mgmt_ip')) return None, None
def allocate_fw_dev(self, fw_id)
Allocate firewall device. Allocate the first Firewall device which has resources available.
3.916985
3.782016
1.035687
for cnt in self.res: used = self.res.get(cnt).get('used') if mgmt_ip == self.res[cnt].get('mgmt_ip'): if new: self.res[cnt]['used'] = used + 1 self.res[cnt]['fw_id_lst'].append(fw_id) return self.res[cnt].get('obj_dict'), ( self.res[cnt].get('mgmt_ip')) return None, None
def populate_fw_dev(self, fw_id, mgmt_ip, new)
Populate the class after a restart.
3.668563
3.568398
1.02807
for cnt in self.res: if fw_id in self.res.get(cnt).get('fw_id_lst'): return self.res[cnt].get('obj_dict'), ( self.res[cnt].get('mgmt_ip')) return None, None
def get_fw_dev_map(self, fw_id)
Return the object dict and mgmt ip for a firewall.
5.600667
3.661863
1.529459
for cnt in self.res: if fw_id in self.res.get(cnt).get('fw_id_lst'): self.res[cnt]['used'] = self.res[cnt]['used'] - 1 self.res.get(cnt).get('fw_id_lst').remove(fw_id) return
def deallocate_fw_dev(self, fw_id)
Release the firewall resource.
3.008141
2.854995
1.053642
for fw_id in fw_dict: fw_data = fw_dict.get(fw_id) mgmt_ip = fw_data.get('fw_mgmt_ip') dev_status = fw_data.get('device_status') if dev_status == 'SUCCESS': new = True else: new = False if mgmt_ip is not None: drvr_dict, mgmt_ip = self.sched_obj.populate_fw_dev(fw_id, mgmt_ip, new) if drvr_dict is None or mgmt_ip is None: LOG.info("Pop cache for FW sch: drvr_dict or mgmt_ip " "is None")
def populate_local_sch_cache(self, fw_dict)
Populate the local cache from FW DB after restart.
3.81791
3.685677
1.035878
cnt = 0 for ip in self.obj_dict: cfg_dict = {} drvr_obj = self.obj_dict.get(ip).get('drvr_obj') cfg_dict['mgmt_ip_addr'] = ip if self.user_list is not None: cfg_dict['user'] = self.user_list[cnt] if self.pwd_list is not None: cfg_dict['pwd'] = self.pwd_list[cnt] if self.interface_in_list is not None: cfg_dict['interface_in'] = self.interface_in_list[cnt] if self.interface_out_list is not None: cfg_dict['interface_out'] = self.interface_out_list[cnt] drvr_obj.initialize(cfg_dict) cnt = cnt + 1
def drvr_initialize(self, cfg)
Initialize the driver routines.
2.171919
2.185774
0.993661
for ip in self.obj_dict: drvr_obj = self.obj_dict.get(ip).get('drvr_obj') drvr_obj.populate_event_que(que_obj)
def populate_event_que(self, que_obj)
Populates the event queue object. This is for sending router events to event handler.
4.402802
4.447738
0.989897
for ip in self.obj_dict: drvr_obj = self.obj_dict.get(ip).get('drvr_obj') drvr_obj.populate_dcnm_obj(dcnm_obj)
def populate_dcnm_obj(self, dcnm_obj)
Populates the DCNM object.
3.833513
3.786177
1.012502
for ip in self.obj_dict: drvr_obj = self.obj_dict.get(ip).get('drvr_obj') ret = drvr_obj.is_device_virtual() # No way to pin a device as of now, so return the first # TODO(padkrish) return ret
def is_device_virtual(self)
Returns if the device is physical or virtual.
8.706341
7.798263
1.116446
drvr_dict, mgmt_ip = self.sched_obj.allocate_fw_dev(fw_id) if drvr_dict is not None and mgmt_ip is not None: self.update_fw_db_mgmt_ip(fw_id, mgmt_ip) ret = drvr_dict.get('drvr_obj').create_fw(tenant_id, data) if not ret: self.sched_obj.deallocate_fw_dev(fw_id) return ret else: return False
def create_fw_device(self, tenant_id, fw_id, data)
Creates the Firewall.
3.353672
3.278456
1.022942
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id) ret = drvr_dict.get('drvr_obj').delete_fw(tenant_id, data) # FW DB gets deleted, so no need to remove the MGMT IP if ret: self.sched_obj.deallocate_fw_dev(fw_id) return ret
def delete_fw_device(self, tenant_id, fw_id, data)
Deletes the Firewall.
6.456001
6.534622
0.987968
drvr_dict, mgmt_ip = self.sched_obj.get_fw_dev_map(fw_id) return drvr_dict.get('drvr_obj').modify_fw(tenant_id, data)
def modify_fw_device(self, tenant_id, fw_id, data)
Modifies the firewall cfg.
7.860921
7.851231
1.001234
for ip in self.obj_dict: drvr_obj = self.obj_dict.get(ip).get('drvr_obj') ret = drvr_obj.network_create_notif(tenant_id, tenant_name, cidr) LOG.info("Driver with IP %(ip)s return %(ret)s", {'ip': ip, 'ret': ret})
def network_create_notif(self, tenant_id, tenant_name, cidr)
Notification for Network create. Since FW ID not present, it's not possible to know which FW instance to call. So, calling everyone, each instance will figure out if it applies to them.
3.406245
3.585423
0.950026
for ip in self.obj_dict: drvr_obj = self.obj_dict.get(ip).get('drvr_obj') ret = drvr_obj.network_delete_notif(tenant_id, tenant_name, net_id) LOG.info("Driver with IP %(ip)s return %(ret)s for network " "delete notification", {'ip': ip, 'ret': ret})
def network_delete_notif(self, tenant_id, tenant_name, net_id)
Notification for Network delete. Since FW ID not present, it's not possible to know which FW instance to call. So, calling everyone, each instance will figure out if it applies to them.
3.371755
3.541698
0.952016
return self._l3plugin.cfg_list_router_ids_on_host(context, host, router_ids, hosting_device_ids)
def get_cfg_router_ids(self, context, host, router_ids=None, hosting_device_ids=None)
Returns IDs of routers scheduled to l3 agent on <host>
5.293848
4.909015
1.078393
adm_context = bc.context.get_admin_context() try: routers = ( self._l3plugin.list_active_sync_routers_on_hosting_devices( adm_context, host, router_ids, hosting_device_ids)) except AttributeError: routers = [] LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s', {'agt': host, 'routers': jsonutils.dumps(routers, indent=5)}) return routers
def cfg_sync_routers(self, context, host, router_ids=None, hosting_device_ids=None)
Sync routers according to filters to a specific Cisco cfg agent. :param context: contains user information :param host: originator of callback :param router_ids: list of router ids to return information about :param hosting_device_ids: list of hosting device ids to get routers for. :returns: a list of routers with their hosting devices, interfaces and floating_ips
4.30846
4.29711
1.002641
with context.session.begin(subtransactions=True): for (floatingip_id, status) in six.iteritems(fip_statuses): LOG.debug("New status for floating IP %(floatingip_id)s: " "%(status)s", {'floatingip_id': floatingip_id, 'status': status}) try: self._l3plugin.update_floatingip_status( context, floatingip_id, status) except l3_exceptions.FloatingIPNotFound: LOG.debug("Floating IP: %s no longer present.", floatingip_id) # Find all floating IPs known to have been the given router # for which an update was not received. Set them DOWN mercilessly # This situation might occur for some asynchronous backends if # notifications were missed known_router_fips = self._l3plugin.get_floatingips( context, {'last_known_router_id': [router_id]}) # Consider only floating ips which were disassociated in the API fips_to_disable = (fip['id'] for fip in known_router_fips if not fip['router_id']) for fip_id in fips_to_disable: LOG.debug("update_fip_statuses: disable: %s", fip_id) self._l3plugin.update_floatingip_status( context, fip_id, bc.constants.FLOATINGIP_STATUS_DOWN)
def update_floatingip_statuses_cfg(self, context, router_id, fip_statuses)
Update operational status for one or several floating IPs. This is called by Cisco cfg agent to update the status of one or several floatingips. :param context: contains user information :param router_id: id of router associated with the floatingips :param router_id: dict with floatingip_id as key and status as value
3.420755
3.606984
0.94837
self._l3plugin.update_router_port_statuses(context, port_ids, status)
def update_port_statuses_cfg(self, context, port_ids, status)
Update the operational statuses of a list of router ports. This is called by the Cisco cfg agent to update the status of a list of ports. :param context: contains user information :param port_ids: list of ids of all the ports for the given status :param status: PORT_STATUS_ACTIVE/PORT_STATUS_DOWN.
5.063385
5.448362
0.929341
try: parser = ConfigParser.ConfigParser() cfg_fp = open(cfg_file) parser.readfp(cfg_fp) cfg_fp.close() except ConfigParser.NoOptionError: cfg_fp.close() print('Failed to find mysql connections credentials.') sys.exit(1) except IOError: print('ERROR: Cannot open %s.', cfg_file) sys.exit(1) value = parser.get('dfa_mysql', 'connection') try: # Find location of pattern in connection parameter as shown below: # http://username:password@host/databasename?characterset=encoding' sobj = re.search(r"(://).*(@).*(/).*(\?)", value) # The list parameter contains: # indices[0], is the index of '://' # indices[1], is the index of '@' # indices[2], is the index of '/' # indices[3], is the index of '?' indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)] # Get the credentials cred = value[indices[0] + 3:indices[1]].split(':') # Get the host name host = value[indices[1] + 1:indices[2]] # Get the database name db_name = value[indices[2] + 1:indices[3]] # Get the character encoding charset = value[indices[3] + 1:].split('=')[1] return cred[0], cred[1], host, db_name, charset except (ValueError, IndexError, AttributeError): print('Failed to find mysql connections credentials.') sys.exit(1)
def get_mysql_credentials(cfg_file)
Get the credentials and database name from options in config file.
2.899332
2.861894
1.013081
if not cfgfile or not outfn: print('ERROR: There is no config file.') sys.exit(0) options = service_options[service_name] with open(cfgfile, 'r') as cf: lines = cf.readlines() for opt in options: op = opt.get('option') res = [line for line in lines if line.startswith(op)] if len(res) > 1: print('ERROR: There are more than one %s option.' % res) sys.exit(0) if res: (op, sep, val) = (res[0].strip('\n').replace(' ', ''). partition('=')) new_val = None if opt.get('is_list'): # Value for this option can contain list of values. # Append the value if it does not exist. if not any(opt.get('value') == value for value in val.split(',')): new_val = ','.join((val, opt.get('value'))) else: if val != opt.get('value'): new_val = opt.get('value') if new_val: opt_idx = lines.index(res[0]) # The setting is different, replace it with new one. lines.pop(opt_idx) lines.insert(opt_idx, '='.join((opt.get('option'), new_val + '\n'))) else: # Option does not exist. Add the option. try: sec_idx = lines.index('[' + opt.get('section') + ']\n') lines.insert(sec_idx + 1, '='.join( (opt.get('option'), opt.get('value') + '\n'))) except ValueError: print('Invalid %s section name.' % opt.get('section')) sys.exit(0) with open(outfn, 'w') as fwp: all_lines = '' for line in lines: all_lines += line fwp.write(all_lines)
def modify_conf(cfgfile, service_name, outfn)
Modify config file neutron and keystone to include enabler options.
2.709267
2.708077
1.00044
cctxt = self.client.prepare() return cctxt.call(context, 'get_all_hosting_devices', host=self.host)
def get_all_hosting_devices(self, context)
Get a list of all hosting devices.
2.718031
2.361391
1.15103
cctxt = self.client.prepare() return cctxt.call(context, 'cfg_sync_all_hosted_routers', host=self.host)
def get_all_hosted_routers(self, context)
Make a remote process call to retrieve the sync data for routers that have been scheduled to a hosting device. :param context: session context
4.5016
4.601696
0.978248
cctxt = self.client.prepare() return cctxt.call(context, 'get_hardware_router_type_id', host=self.host)
def get_hardware_router_type_id(self, context)
Get the ID for the ASR1k hardware router type.
2.678913
2.473252
1.083154
args = jsonutils.loads(msg) when = args.get('when') agent = args.get('agent') # The configurations in here, only used once when creating entry # for an agent in DB for the first time. configurations = {'uplink': ''} LOG.debug('heartbeat received: %(time)s - %(agent)s', ( {'time': when, 'agent': agent})) if self.obj.neutron_event: self.obj.neutron_event.create_rpc_client(agent) # Other option is to add the event to the queue for processig it later. self.obj.update_agent_status(agent, when) # Update the agents database. agent_info = dict(timestamp=utils.utc_time(when), host=agent, config=jsonutils.dumps(configurations)) self.obj.update_agent_db(agent_info)
def heartbeat(self, context, msg)
Process heartbeat message from agents on compute nodes.
7.119282
7.037612
1.011605
LOG.debug('request_uplink_info from %(agent)s', {'agent': agent}) # Add the request into queue for processing. event_type = 'agent.request.uplink' payload = {'agent': agent} timestamp = time.ctime() data = (event_type, payload) pri = self.obj.PRI_LOW_START + 1 self.obj.pqueue.put((pri, timestamp, data)) LOG.debug('Added request uplink info into queue.') return 0
def request_uplink_info(self, context, agent)
Process uplink message from an agent.
5.580308
5.43975
1.025839
args = jsonutils.loads(msg) macaddr = args.get('mac') ipaddr = args.get('ip') LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', ( {'mac': macaddr, 'ip': ipaddr})) # Add the request into queue for processing. event_type = 'cli.static_ip.set' payload = {'mac': macaddr, 'ip': ipaddr} timestamp = time.ctime() data = (event_type, payload) pri = self.obj.PRI_LOW_START self.obj.pqueue.put((pri, timestamp, data)) LOG.debug('Added request to add static ip into queue.') return 0
def set_static_ip_address(self, context, msg)
Process request for setting rules in iptables. In cases that static ip address is assigned for a VM, it is needed to update the iptables rule for that address.
4.853479
5.033251
0.964283
args = jsonutils.loads(msg) agent = context.get('agent') port_id = args.get('port_uuid') result = args.get('result') LOG.debug('update_vm_result received from %(agent)s: ' '%(port_id)s %(result)s', {'agent': agent, 'port_id': port_id, 'result': result}) # Add the request into queue for processing. event_type = 'agent.vm_result.update' payload = {'port_id': port_id, 'result': result} timestamp = time.ctime() data = (event_type, payload) # TODO(nlahouti) use value defined in constants pri = self.obj.PRI_LOW_START + 10 self.obj.pqueue.put((pri, timestamp, data)) LOG.debug('Added request vm result update into queue.') return 0
def update_vm_result(self, context, msg)
Update VM's result field in the DB. The result reflects the success of failure of operation when an agent processes the vm info.
5.169995
5.129924
1.007811