code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
for tenant_id in self.fwid_attr: try: with self.fwid_attr[tenant_id].mutex_lock: # For both create and delete case fw_data = self.get_fw_by_tenant_id(tenant_id) if fw_data is None: LOG.info("No FW for tenant %s", tenant_id) continue result = fw_data.get('result').split('(')[0] if result == fw_constants.RESULT_FW_DELETE_INIT: fw_dict = self.fwid_attr[tenant_id].get_fw_dict() # This means a restart has happened before the FW is # completely deleted if not fw_dict: # Need to fill fw_dict from fw_data fw_dict = self.fill_fw_dict_from_db(fw_data) self.retry_failure_fab_dev_delete(tenant_id, fw_data, fw_dict) except Exception as exc: LOG.error("Exception in retry failure delete %s", str(exc))
def fw_retry_failures_delete(self)
This routine is called for retrying the delete cases.
4.221718
4.172724
1.011742
if not self.fw_init: return try: self.fw_retry_failures_create() self.fw_retry_failures_delete() except Exception as exc: LOG.error("Exception in retry failures %s", str(exc))
def fw_retry_failures(self)
Top level retry routine called.
4.271178
4.08871
1.044627
linespec += ".*" start_points = [] for item in self._indent_list: match = re.search(linespec, item[1]) if match: entry = (item, self._indent_list.index(item)) start_points.append(entry) return start_points
def _find_starts(self, linespec)
Finds the start points. Start points matching the linespec regex are returned as list in the following format: [(item, index), (item, index).....
3.599809
3.787194
0.950521
res = [] linespec += ".*" for line in self.cfg: match = re.search(linespec, line) if match: res.append(match.group(0)) return res
def find_lines(self, linespec)
Find lines that match the linespec regex.
3.37815
3.211756
1.051808
# Note(asr1kteam): In this code we are only adding children one-level # deep to a given parent (linespec), as that satisfies the IOS conf # parsing. # Note(asr1kteam): Not tested with tabs in the config. Currently used # with IOS config where we haven't seen tabs, but may be needed for a # more general case. res = [] self._build_indent_based_list() for item, index in self._find_starts(linespec): parent = LineItem(item[1]) next_ident_level = self._find_next_indent_level(index) if next_ident_level: # We start iterating from the next element for item in self._indent_list[(index + 1):]: if item[0] == next_ident_level: parent.add_children(LineItem(item[1])) elif item[0] > next_ident_level: # We skip higher indent continue else: # Indent level is same or lesser than item break res.append(parent) return res
def find_objects(self, linespec)
Find lines that match the linespec regex. :param linespec: regular expression of line to match :return: list of LineItem objects
8.381076
8.610109
0.973399
res = [] for parent in self.find_objects(linespec): res.append(parent.line) res.extend([child.line for child in parent.children]) return res
def find_children(self, linespec)
Find lines and immediate children that match the linespec regex. :param linespec: regular expression of line to match :returns: list of lines. These correspond to the lines that were matched and their immediate children
3.959249
4.44993
0.889733
if is_ncb: self.run_lldptool(["-L", "-i", port_name, "-g", "ncb", "adminStatus=rxtx"]) if is_nb: self.run_lldptool(["-L", "-i", port_name, "-g", "nb", "adminStatus=rxtx"])
def enable_lldp(self, port_name, is_ncb=True, is_nb=False)
Function to enable LLDP on the interface.
3.478737
3.340048
1.041523
if self.is_ncb: self.run_lldptool(["-L", "-i", self.port_name, "-g", "ncb", "adminStatus=rxtx"]) if self.is_nb: self.run_lldptool(["-L", "-i", self.port_name, "-g", "nb", "adminStatus=rxtx"])
def enable_lldp(self)
Function to enable LLDP on the interface.
4.667096
4.355362
1.071575
if self.is_ncb: self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb", "-V", "evb", "enableTx=yes"]) ret = self.enable_gpid() return ret else: LOG.error("EVB cannot be set on NB") return False
def enable_evb(self)
Function to enable EVB on the interface.
12.43461
10.860614
1.144927
if self.is_ncb: self.run_lldptool(["-T", "-i", self.port_name, "-g", "ncb", "-V", "evb", "-c", "evbgpid=yes"]) return True else: LOG.error("GPID cannot be set on NB") return False
def enable_gpid(self)
Function to enable Group ID on the interface. This is needed to use the MAC, GID, VID Filter.
12.070039
11.195246
1.07814
LOG.debug("Refresh handler") try: if not self.vdp_vif_map: LOG.debug("vdp_vif_map not created, returning") return vdp_vif_map = dict.copy(self.vdp_vif_map) oui_vif_map = dict.copy(self.oui_vif_map) for key in six.iterkeys(vdp_vif_map): lvdp_dict = vdp_vif_map.get(key) loui_dict = oui_vif_map.get(key) if not lvdp_dict: return if not loui_dict: oui_id = "" oui_data = "" else: oui_id = loui_dict.get('oui_id') oui_data = loui_dict.get('oui_data') with self.mutex_lock: if key in self.vdp_vif_map: LOG.debug("Sending Refresh for VSI %s", lvdp_dict) vdp_vlan, fail_reason = self.send_vdp_assoc( vsiid=lvdp_dict.get('vsiid'), mgrid=lvdp_dict.get('mgrid'), typeid=lvdp_dict.get('typeid'), typeid_ver=lvdp_dict.get('typeid_ver'), vsiid_frmt=lvdp_dict.get('vsiid_frmt'), filter_frmt=lvdp_dict.get('filter_frmt'), gid=lvdp_dict.get('gid'), mac=lvdp_dict.get('mac'), vlan=0, oui_id=oui_id, oui_data=oui_data, sw_resp=True) # check validity. if not utils.is_valid_vlan_tag(vdp_vlan): LOG.error("Returned vlan %(vlan)s is invalid.", {'vlan': vdp_vlan}) # Need to invoke CB. So no return here. vdp_vlan = 0 exist_vdp_vlan = lvdp_dict.get('vdp_vlan') exist_fail_reason = lvdp_dict.get('fail_reason') callback_count = lvdp_dict.get('callback_count') # Condition will be hit only during error cases when switch # reloads or when compute reloads if vdp_vlan != exist_vdp_vlan or ( fail_reason != exist_fail_reason or callback_count > vdp_const.CALLBACK_THRESHOLD): # Invoke the CB Function cb_fn = lvdp_dict.get('vsw_cb_fn') cb_data = lvdp_dict.get('vsw_cb_data') if cb_fn: cb_fn(cb_data, vdp_vlan, fail_reason) lvdp_dict['vdp_vlan'] = vdp_vlan lvdp_dict['fail_reason'] = fail_reason lvdp_dict['callback_count'] = 0 else: lvdp_dict['callback_count'] += 1 except Exception as e: LOG.error("Exception in Refrsh %s", str(e))
def _vdp_refrsh_hndlr(self)
Periodic refresh of vNIC events to VDP. VDP daemon itself has keepalives. This is needed on top of it to keep Orchestrator like OpenStack, VDP daemon and the physical switch in sync.
2.849933
2.85118
0.999562
full_args = ['lldptool'] + args try: utils.execute(full_args, root_helper=self.root_helper) except Exception as e: LOG.error("Unable to execute %(cmd)s. " "Exception: %(exception)s", {'cmd': full_args, 'exception': e})
def run_lldptool(self, args)
Function for invoking the lldptool utility.
2.582066
2.518688
1.025163
self.oui_vif_map[port_uuid] = {'oui_id': oui_type, 'oui_data': oui_data}
def store_oui(self, port_uuid, oui_type, oui_data)
Function for storing the OUI. param uuid: UUID of the vNIC param oui_type: OUI ID param oui_data: OUI Opaque Data
3.41613
3.183757
1.072987
if port_uuid in self.vdp_vif_map: LOG.debug("Not Storing VDP VSI MAC %(mac)s UUID %(uuid)s", {'mac': mac, 'uuid': vsiid}) if new_network: vdp_vlan = reply else: vdp_vlan = vlan vdp_dict = {'vdp_vlan': vdp_vlan, 'mgrid': mgrid, 'typeid': typeid, 'typeid_ver': typeid_ver, 'vsiid_frmt': vsiid_frmt, 'vsiid': vsiid, 'filter_frmt': filter_frmt, 'mac': mac, 'gid': gid, 'vsw_cb_fn': vsw_cb_fn, 'vsw_cb_data': vsw_cb_data, 'fail_reason': reason, 'callback_count': 0} self.vdp_vif_map[port_uuid] = vdp_dict LOG.debug("Storing VDP VSI MAC %(mac)s UUID %(uuid)s VDP VLAN " "%(vlan)s", {'mac': mac, 'uuid': vsiid, 'vlan': vdp_vlan}) if oui_id: self.store_oui(port_uuid, oui_id, oui_data)
def store_vdp_vsi(self, port_uuid, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, new_network, reply, oui_id, oui_data, vsw_cb_fn, vsw_cb_data, reason)
Stores the vNIC specific info for VDP Refresh. :param uuid: vNIC UUID :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param new_network: Is this the first vNIC of this network :param reply: Response from the switch :param oui_id: OUI Type :param oui_data: OUI Data :param vsw_cb_fn: Callback function from the app. :param vsw_cb_data: Callback data for the app. :param reason: Failure Reason
1.876955
2.00789
0.93479
if port_uuid in self.oui_vif_map: del self.oui_vif_map[port_uuid] else: LOG.debug("OUI does not exist")
def clear_oui(self, port_uuid)
Clears the OUI specific info. :param uuid: vNIC UUID Currently only one OUI per VSI fixme(padkrish)
3.056348
3.066146
0.996804
try: LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s", {'mac': self.vdp_vif_map[port_uuid].get('mac'), 'uuid': self.vdp_vif_map[port_uuid].get('vsiid')}) del self.vdp_vif_map[port_uuid] except Exception: LOG.error("VSI does not exist") self.clear_oui(port_uuid)
def clear_vdp_vsi(self, port_uuid)
Stores the vNIC specific info for VDP Refresh. :param uuid: vNIC UUID
2.934058
3.21888
0.911515
oui_list = [] vm_name = oui_data.get('vm_name') if vm_name is not None: oui_str = "oui=%s," % oui_id oui_name_str = oui_str + "vm_name=" + vm_name oui_list.append(oui_name_str) ip_addr = oui_data.get('ip_addr') if ip_addr is not None: oui_str = "oui=%s," % oui_id ip_addr_str = oui_str + "ipv4_addr=" + ip_addr oui_list.append(ip_addr_str) vm_uuid = oui_data.get('vm_uuid') if vm_uuid is not None: oui_str = "oui=%s," % oui_id vm_uuid_str = oui_str + "vm_uuid=" + vm_uuid oui_list.append(vm_uuid_str) return oui_list
def gen_cisco_vdp_oui(self, oui_id, oui_data)
Cisco specific handler for constructing OUI arguments.
1.631428
1.594534
1.023138
oui_str = [] for oui in oui_list: oui_str.append('-c') oui_str.append(oui) return oui_str
def gen_oui_str(self, oui_list)
Generate the OUI string for vdptool.
2.458477
2.222044
1.106403
vdp_keyword_str = {} if mgrid is None: mgrid = self.vdp_opts.get('mgrid') mgrid_str = "mgrid2=%s" % mgrid if typeid is None: typeid = self.vdp_opts.get('typeid') typeid_str = "typeid=%s" % typeid if typeid_ver is None: typeid_ver = self.vdp_opts.get('typeidver') typeid_ver_str = "typeidver=%s" % typeid_ver if int(vsiid_frmt) == int(self.vdp_opts.get('vsiidfrmt')): vsiid_str = "uuid=%s" % vsiid else: # Only format supported for now LOG.error("Unsupported VSIID Format1") return vdp_keyword_str if vlan == constants.INVALID_VLAN: vlan = 0 if int(filter_frmt) == vdp_const.VDP_FILTER_GIDMACVID: if not mac or gid == 0: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str else: f = "filter=%s-%s-%s" filter_str = f % (vlan, mac, gid) elif int(filter_frmt) == vdp_const.VDP_FILTER_GIDVID: if gid == 0: LOG.error("NULL GID Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "--" + '%ld' % gid elif int(filter_frmt) == vdp_const.VDP_FILTER_MACVID: if not mac: LOG.error("NULL MAC Specified") return vdp_keyword_str else: filter_str = "filter=" + '%d' % vlan + "-" + mac elif int(filter_frmt) == vdp_const.VDP_FILTER_VID: filter_str = "filter=" + '%d' % vlan else: LOG.error("Incorrect Filter Format Specified") return vdp_keyword_str oui_list = [] if oui_id is not None and oui_data is not None: if oui_id is 'cisco': oui_list = self.gen_cisco_vdp_oui(oui_id, oui_data) mode_str = "mode=" + mode vdp_keyword_str = dict(mode=mode_str, mgrid=mgrid_str, typeid=typeid_str, typeid_ver=typeid_ver_str, vsiid=vsiid_str, filter=filter_str, oui_list=oui_list) return vdp_keyword_str
def construct_vdp_dict(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data)
Constructs the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :return vdp_keyword_str: Dictionary of VDP arguments and values
2.122021
2.097991
1.011454
if not self.is_ncb: LOG.error("EVB cannot be set on NB") return vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, None, None) if len(vdp_key_str) == 0: LOG.error("NULL List") return reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode, "-c", vdp_key_str['mode'], "-c", vdp_key_str['mgrid'], "-c", vdp_key_str['typeid'], "-c", vdp_key_str['typeid_ver'], "-c", vdp_key_str['vsiid']]) return reply
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data)
Constructs and Sends the VDP Query Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return reply: Reply from vdptool
3.574146
3.502198
1.020543
if not self.is_ncb: LOG.error("EVB cannot be set on NB") return vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data) if len(vdp_key_str) == 0: LOG.error("NULL List") return oui_cmd_str = self.gen_oui_str(vdp_key_str['oui_list']) if sw_resp: # If filter is not VID and if VLAN is 0, Query for the TLV first, # if found VDP will return the VLAN. Add support for this once # vdptool has the support for querying exact VSI filters # fixme(padkrish) reply = self.run_vdptool(["-T", "-i", self.port_name, "-W", "-V", mode, "-c", vdp_key_str['mode'], "-c", vdp_key_str['mgrid'], "-c", vdp_key_str['typeid'], "-c", vdp_key_str['typeid_ver'], "-c", vdp_key_str['vsiid'], "-c", "hints=none", "-c", vdp_key_str['filter']], oui_args=oui_cmd_str) else: reply = self.run_vdptool(["-T", "-i", self.port_name, "-V", mode, "-c", vdp_key_str['mode'], "-c", vdp_key_str['mgrid'], "-c", vdp_key_str['typeid'], "-c", vdp_key_str['typeid_ver'], "-c", vdp_key_str['vsiid'], "-c", "hints=none", "-c", vdp_key_str['filter']], oui_args=oui_cmd_str) return reply
def send_vdp_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data, sw_resp)
Constructs and Sends the VDP Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return reply: Reply from vdptool
3.246522
3.104344
1.0458
vsiid_reply = reply.partition("uuid")[2].split()[0][4:] if vsiid != vsiid_reply: fail_reason = vdp_const.vsi_mismatch_failure_reason % ( vsiid, vsiid_reply) LOG.error("%s", fail_reason) return False, fail_reason mac_reply = reply.partition("filter")[2].split('-')[1] if mac != mac_reply: fail_reason = vdp_const.mac_mismatch_failure_reason % ( mac, mac_reply) LOG.error("%s", fail_reason) return False, fail_reason return True, None
def crosscheck_query_vsiid_mac(self, reply, vsiid, mac)
Cross Check the reply against the input vsiid,mac for get query.
2.8332
2.870041
0.987164
try: fail_reason = reply.partition( "filter")[0].replace('\t', '').split('\n')[-2] if len(fail_reason) == 0: fail_reason = vdp_const.retrieve_failure_reason % (reply) except Exception: fail_reason = vdp_const.retrieve_failure_reason % (reply) return fail_reason
def get_vdp_failure_reason(self, reply)
Parse the failure reason from VDP.
4.569385
4.190115
1.090515
try: f_ind = reply.index(filter_str) l_ind = reply.rindex(filter_str) except Exception: fail_reason = vdp_const.filter_failure_reason % (reply) LOG.error("%s", fail_reason) return False, fail_reason if f_ind != l_ind: # Currently not supported if reply contains a filter keyword fail_reason = vdp_const.multiple_filter_failure_reason % (reply) LOG.error("%s", fail_reason) return False, fail_reason return True, None
def check_filter_validity(self, reply, filter_str)
Check for the validify of the filter.
3.660053
3.615225
1.0124
try: verify_flag, fail_reason = self.crosscheck_reply_vsiid_mac( reply, vsiid, mac) if not verify_flag: return constants.INVALID_VLAN, fail_reason mode_str = reply.partition("mode = ")[2].split()[0] if mode_str != "assoc": fail_reason = self.get_vdp_failure_reason(reply) return constants.INVALID_VLAN, fail_reason except Exception: fail_reason = vdp_const.mode_failure_reason % (reply) LOG.error("%s", fail_reason) return constants.INVALID_VLAN, fail_reason check_filter, fail_reason = self.check_filter_validity( reply, "filter = ") if not check_filter: return constants.INVALID_VLAN, fail_reason try: vlan_val = reply.partition("filter = ")[2].split('-')[0] vlan = int(vlan_val) except ValueError: fail_reason = vdp_const.format_failure_reason % (reply) LOG.error("%s", fail_reason) return constants.INVALID_VLAN, fail_reason return vlan, None
def get_vlan_from_associate_reply(self, reply, vsiid, mac)
Parse the associate reply from VDP daemon to get the VLAN value.
3.115593
2.941507
1.059183
try: f_ind = reply.index("hints") l_ind = reply.rindex("hints") except Exception: fail_reason = vdp_const.hints_failure_reason % (reply) LOG.error("%s", fail_reason) return False, fail_reason if f_ind != l_ind: # Currently not supported if reply contains a filter keyword fail_reason = vdp_const.multiple_hints_failure_reason % (reply) LOG.error("%s", fail_reason) return False, fail_reason try: hints_compl = reply.partition("hints")[2] hints_val = reply.partition("hints")[2][0:4] len_hints = int(hints_val) hints_val = hints_compl[4:4 + len_hints] hints = int(hints_val) if hints != 0: fail_reason = vdp_const.nonzero_hints_failure % (hints) return False, fail_reason except ValueError: fail_reason = vdp_const.format_failure_reason % (reply) LOG.error("%s", fail_reason) return False, fail_reason return True, None
def check_hints(self, reply)
Parse the hints to check for errors.
3.238295
3.172472
1.020748
hints_ret, fail_reason = self.check_hints(reply) if not hints_ret: LOG.error("Incorrect hints found %s", reply) return constants.INVALID_VLAN, fail_reason check_filter, fail_reason = self.check_filter_validity(reply, "filter") if not check_filter: return constants.INVALID_VLAN, fail_reason try: verify_flag, fail_reason = self.crosscheck_query_vsiid_mac( reply, vsiid, mac) if not verify_flag: return constants.INVALID_VLAN, fail_reason filter_val = reply.partition("filter")[2] len_fil = len(filter_val) vlan_val = filter_val[4:len_fil].split('-')[0] vlan = int(vlan_val) except ValueError: fail_reason = vdp_const.format_failure_reason % (reply) LOG.error("%s", fail_reason) return constants.INVALID_VLAN, fail_reason return vlan, None
def get_vlan_from_query_reply(self, reply, vsiid, mac)
Parse the query reply from VDP daemon to get the VLAN value.
4.076418
3.82516
1.065686
if sw_resp and filter_frmt == vdp_const.VDP_FILTER_GIDMACVID: reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data) vlan_resp, fail_reason = self.get_vlan_from_query_reply( reply, vsiid, mac) if vlan_resp != constants.INVALID_VLAN: return vlan_resp, fail_reason reply = self.send_vdp_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data, sw_resp) if sw_resp: vlan, fail_reason = self.get_vlan_from_associate_reply( reply, vsiid, mac) return vlan, fail_reason return None, None
def send_vdp_assoc(self, vsiid=None, mgrid=None, typeid=None, typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID, filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0, mac="", vlan=0, oui_id="", oui_data="", sw_resp=False)
Sends the VDP Associate Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return vlan: VLAN value returned by vdptool which in turn is given : by Switch
2.031129
2.119042
0.958513
if oui is None: oui = {} oui_id = None oui_data = None if 'oui_id' in oui: oui_id = oui['oui_id'] oui_data = oui reply, fail_reason = self.send_vdp_assoc( vsiid=vsiid, mgrid=mgrid, typeid=typeid, typeid_ver=typeid_ver, vsiid_frmt=vsiid_frmt, filter_frmt=filter_frmt, gid=gid, mac=mac, vlan=vlan, oui_id=oui_id, oui_data=oui_data, sw_resp=new_network) self.store_vdp_vsi(port_uuid, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, new_network, reply, oui_id, oui_data, vsw_cb_fn, vsw_cb_data, fail_reason) return reply, fail_reason
def send_vdp_vnic_up(self, port_uuid=None, vsiid=None, mgrid=None, typeid=None, typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID, filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0, mac="", vlan=0, oui=None, new_network=False, vsw_cb_fn=None, vsw_cb_data=None)
Interface function to apps, called for a vNIC UP. This currently sends an VDP associate message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param uuid: uuid of the vNIC :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return reply: VLAN reply from vdptool
1.971574
1.807971
1.09049
# Correct non-zero VLAN needs to be specified try: with self.mutex_lock: self.send_vdp_deassoc(vsiid=vsiid, mgrid=mgrid, typeid=typeid, typeid_ver=typeid_ver, vsiid_frmt=vsiid_frmt, filter_frmt=filter_frmt, gid=gid, mac=mac, vlan=vlan) self.clear_vdp_vsi(port_uuid) except Exception as e: LOG.error("VNIC Down exception %s", e)
def send_vdp_vnic_down(self, port_uuid=None, vsiid=None, mgrid=None, typeid=None, typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID, filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0, mac="", vlan=0, oui="")
Interface function to apps, called for a vNIC DOWN. This currently sends an VDP dis-associate message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param uuid: uuid of the vNIC :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon
3.133425
3.25154
0.963674
if oui_args is None: oui_args = [] full_args = ['vdptool'] + args + oui_args try: return utils.execute(full_args, root_helper=self.root_helper) except Exception as e: LOG.error("Unable to execute %(cmd)s. " "Exception: %(exception)s", {'cmd': full_args, 'exception': e})
def run_vdptool(self, args, oui_args=None)
Function that runs the vdptool utility.
2.187036
2.145356
1.019428
cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'cfg_sync_routers', host=self.host, router_ids=router_ids, hosting_device_ids=hd_ids)
def get_routers(self, context, router_ids=None, hd_ids=None)
Make a remote process call to retrieve the sync data for routers. :param context: session context :param router_ids: list of routers to fetch :param hd_ids: hosting device ids, only routers assigned to these hosting devices will be returned.
3.439691
3.339212
1.030091
cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'update_floatingip_statuses_cfg', router_id=router_id, fip_statuses=fip_statuses)
def update_floatingip_statuses(self, context, router_id, fip_statuses)
Make a remote process call to update operational status for one or several floating IPs. @param context: contains user information @param router_id: id of router associated with the floatingips @param fip_statuses: dict with floatingip_id as key and status as value
2.6221
3.035866
0.863708
cctxt = self.client.prepare(version='1.1') return cctxt.call(context, 'update_port_statuses_cfg', port_ids=port_ids, status=status)
def send_update_port_statuses(self, context, port_ids, status)
Call the pluging to update the port status which updates the DB. :param context: contains user information :param port_ids: list of ids of the ports associated with the status :param status: value of the status for the given port list (port_ids)
2.92451
3.328707
0.878573
LOG.debug('Got router deleted notification for %s', routers) self._update_removed_routers_cache(routers)
def router_deleted(self, context, routers)
Deal with router deletion RPC message.
6.294071
5.387925
1.168181
LOG.debug('Got routers updated notification :%s', routers) if routers: # This is needed for backward compatibility if isinstance(routers[0], dict): routers = [router['id'] for router in routers] self._update_updated_routers_cache(routers)
def routers_updated(self, context, routers)
Deal with routers modification and creation RPC message.
3.826076
3.631097
1.053697
num_ex_gw_ports = 0 num_interfaces = 0 num_floating_ips = 0 router_infos = self.router_info.values() num_routers = len(router_infos) num_hd_routers = collections.defaultdict(int) for ri in router_infos: ex_gw_port = ri.router.get('gw_port') if ex_gw_port: num_ex_gw_ports += 1 num_interfaces += len(ri.router.get( bc.constants.INTERFACE_KEY, [])) num_floating_ips += len(ri.router.get( bc.constants.FLOATINGIP_KEY, [])) hd = ri.router['hosting_device'] if hd: num_hd_routers[hd['id']] += 1 routers_per_hd = dict((hd_id, {'routers': num}) for hd_id, num in num_hd_routers.items()) non_responding = self._dev_status.get_backlogged_hosting_devices() configurations['total routers'] = num_routers configurations['total ex_gw_ports'] = num_ex_gw_ports configurations['total interfaces'] = num_interfaces configurations['total floating_ips'] = num_floating_ips configurations['hosting_devices'] = routers_per_hd configurations['non_responding_hosting_devices'] = non_responding return configurations
def collect_state(self, configurations)
Collect state from this helper. A set of attributes which summarizes the state of the routers and configurations managed by this config agent. :param configurations: dict of configuration values :return dict of updated configuration values
2.588926
2.433843
1.063719
try: if all_routers: LOG.debug('Fetching all routers') router_ids = self.plugin_rpc.get_router_ids(self.context) routers = self._fetch_router_chunk_data(router_ids) elif router_ids: routers = self._fetch_router_chunk_data(router_ids) elif device_ids: return self.plugin_rpc.get_routers(self.context, hd_ids=device_ids) except oslo_messaging.MessagingTimeout: if self.sync_routers_chunk_size > SYNC_ROUTERS_MIN_CHUNK_SIZE: self.sync_routers_chunk_size = max( int(round(self.sync_routers_chunk_size / 2)), SYNC_ROUTERS_MIN_CHUNK_SIZE) LOG.warning('Server failed to return info for routers in ' 'required time, decreasing chunk size to: %s', self.sync_routers_chunk_size) else: LOG.warning('Server failed to return info for routers in ' 'required time even with min chunk size: %s. ' 'It might be under very high load or just ' 'inoperable', self.sync_routers_chunk_size) raise except oslo_messaging.MessagingException: LOG.exception("RPC Error in fetching routers from plugin") self.fullsync = True raise n_exc.AbortSyncRouters() LOG.debug("Periodic_sync_routers_task successfully completed") # adjust chunk size after successful sync if (self.sync_routers_chunk_size < cfg.CONF.cfg_agent.max_device_sync_batch_size): self.sync_routers_chunk_size = min( self.sync_routers_chunk_size + SYNC_ROUTERS_MIN_CHUNK_SIZE, cfg.CONF.cfg_agent.max_device_sync_batch_size) return routers
def _fetch_router_info(self, router_ids=None, device_ids=None, all_routers=False)
Fetch router dict from the routing plugin. :param router_ids: List of router_ids of routers to fetch :param device_ids: List of device_ids whose routers to fetch :param all_routers: If True fetch all the routers for this agent. :return: List of router dicts of format: [ {router_dict1}, {router_dict2},.....]
3.126449
3.154376
0.991147
curr_router = [] if len(router_ids) > self.sync_routers_chunk_size: # fetch routers by chunks to reduce the load on server and # to start router processing earlier for i in range(0, len(router_ids), self.sync_routers_chunk_size): routers = self.plugin_rpc.get_routers( self.context, (router_ids[i:i + self.sync_routers_chunk_size])) LOG.debug('Processing :%r', routers) for r in routers: curr_router.append(r) else: curr_router = self.plugin_rpc.get_routers( self.context, router_ids=router_ids) return curr_router
def _fetch_router_chunk_data(self, router_ids=None)
Fetch router data from the routing plugin in chunks. :param router_ids: List of router_ids of routers to fetch :return: List of router dicts of format: [ {router_dict1}, {router_dict2},.....]
3.434061
3.315341
1.035809
sync_devices_list = list(self.sync_devices) LOG.debug("Fetching routers on:%s", sync_devices_list) fetched_routers = self._fetch_router_info(device_ids=sync_devices_list) if fetched_routers: LOG.debug("[sync_devices] Fetched routers :%s", pp.pformat(fetched_routers)) # clear router_config cache for router_dict in fetched_routers: self._del_from_updated_routers_cache(router_dict['id']) self._del_from_removed_routers_cache(router_dict['id']) LOG.debug("[sync_devices] invoking " "_router_removed(%s)", router_dict['id']) self._router_removed(router_dict['id'], deconfigure=False) self._cleanup_invalid_cfg(fetched_routers) routers.extend(fetched_routers) self.sync_devices.clear() LOG.debug("[sync_devices] %s finished", sync_devices_list) else: # If the initial attempt to sync a device # failed, retry again (by not clearing sync_devices) # Normal updated_routers processing is still allowed # to happen self.sync_devices_attempts += 1 if (self.sync_devices_attempts >= cfg.CONF.cfg_agent.max_device_sync_attempts): LOG.debug("Max number [%d / %d ] of sync_devices " "attempted. No further retries will " "be attempted." % (self.sync_devices_attempts, cfg.CONF.cfg_agent.max_device_sync_attempts)) self.sync_devices.clear() self.sync_devices_attempts = 0 else: LOG.debug("Fetched routers was blank for sync attempt " "[%d / %d], will attempt resync of %s devices " "again in the next iteration" % (self.sync_devices_attempts, cfg.CONF.cfg_agent.max_device_sync_attempts, pp.pformat(self.sync_devices)))
def _handle_sync_devices(self, routers)
Handles routers during a device_sync. This method performs post-processing on routers fetched from the routing plugin during a device sync. Routers are first fetched from the plugin based on the list of device_ids. Since fetched routers take precedence over pending work, matching router-ids buffered in update_routers and removed_routers are discarded. The existing router cache is also cleared in order to properly trigger updates and deletes. Lastly, invalid configuration in the underlying hosting-device is deleted via _cleanup_invalid_cfg. Modifies updated_routers, removed_routers, and sync_devices attributes :param routers: working list of routers as populated in process_services
3.672112
3.421968
1.073099
removed_router_ids = [] for hd_id, resources in removed_devices_info['hosting_data'].items(): removed_router_ids += resources.get('routers', []) return removed_router_ids
def _get_router_ids_from_removed_devices_info(removed_devices_info)
Extract router_ids from the removed devices info dict. :param removed_devices_info: Dict of removed devices and their associated resources. Format: { 'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]}, 'hd_id2': {'routers': [id3, id4, ...]}, ... }, 'deconfigure': True/False } :return removed_router_ids: List of removed router ids
4.568629
2.680033
1.704691
hosting_devices = {} for key in resources.keys(): for r in resources.get(key) or []: if r.get('hosting_device') is None: continue hd_id = r['hosting_device']['id'] hosting_devices.setdefault(hd_id, {}) hosting_devices[hd_id].setdefault(key, []).append(r) return hosting_devices
def _sort_resources_per_hosting_device(resources)
This function will sort the resources on hosting device. The sorting on hosting device is done by looking up the `hosting_device` attribute of the resource, and its `id`. :param resources: a dict with key of resource name :return dict sorted on the hosting device of input resource. Format: hosting_devices = { 'hd_id1' : {'routers':[routers], 'removed_routers':[routers], .... } 'hd_id2' : {'routers':[routers], .. } ....... }
2.415951
2.182603
1.106913
#ToDo(Hareesh): Simplify if possible for r in routers: if r[ROUTER_ROLE_ATTR] == c_constants.ROUTER_ROLE_GLOBAL: LOG.debug("Global router:%s found. Moved to the end of list " "for processing", r['id']) routers.remove(r) routers.append(r)
def _adjust_router_list_for_global_router(self, routers)
Pushes 'Global' routers to the end of the router list, so that deleting default route occurs before deletion of external nw subintf
5.257166
5.236624
1.003923
if not port_ids: return MAX_PORTS_IN_BATCH = 50 list_chunks_ports = [port_ids[i:i + MAX_PORTS_IN_BATCH] for i in six.moves.range(0, len(port_ids), MAX_PORTS_IN_BATCH)] for chunk_ports in list_chunks_ports: self.plugin_rpc.send_update_port_statuses(self.context, chunk_ports, status)
def _send_update_port_statuses(self, port_ids, status)
Sends update notifications to set the operational status of the list of router ports provided. To make each notification doesn't exceed the RPC length, each message contains a maximum of MAX_PORTS_IN_BATCH port ids. :param port_ids: List of ports to update the status :param status: operational status to update (ex: bc.constants.PORT_STATUS_ACTIVE)
2.504256
2.36274
1.059895
try: ex_gw_port = ri.router.get('gw_port') ri.ha_info = ri.router.get('ha_info', None) gateway_set = ex_gw_port and not ri.ex_gw_port gateway_cleared = not ex_gw_port and ri.ex_gw_port internal_ports = ri.router.get(bc.constants.INTERFACE_KEY, []) # Once the gateway is set, then we know which VRF this router # belongs to. Keep track of it in our lists of routers, organized # as a dictionary by VRF name if gateway_set: self._add_rid_to_vrf_list(ri) new_ports, old_ports, change_details = ( self._get_internal_port_changes(ri, internal_ports)) list_port_ids_up = [] non_global_router_roles = [None, c_constants.ROUTER_ROLE_HA_REDUNDANCY] if ri.router[ROUTER_ROLE_ATTR] in non_global_router_roles: self._process_new_ports(ri, new_ports, ex_gw_port, list_port_ids_up, change_details) self._process_old_ports(ri, old_ports, ex_gw_port, change_details) else: self._process_new_ports_global(ri, new_ports, ex_gw_port, list_port_ids_up) self._process_old_ports_global(ri, old_ports, ex_gw_port) if gateway_set: self._process_gateway_set(ri, ex_gw_port, list_port_ids_up) elif gateway_cleared: self._process_gateway_cleared(ri, ri.ex_gw_port) self._send_update_port_statuses(list_port_ids_up, bc.constants.PORT_STATUS_ACTIVE) if ex_gw_port: self._process_router_floating_ips(ri, ex_gw_port) global_router_roles = [c_constants.ROUTER_ROLE_GLOBAL, c_constants.ROUTER_ROLE_LOGICAL_GLOBAL] if ri.router[ROUTER_ROLE_ATTR] not in global_router_roles: self._enable_disable_ports(ri, ex_gw_port, internal_ports) if gateway_cleared: # Remove this router from the list of routers by VRF self._remove_rid_from_vrf_list(ri) ri.ex_gw_port = ex_gw_port self._routes_updated(ri) except cfg_exceptions.HAParamsMissingException as e: self._update_updated_routers_cache([ri.router_id]) LOG.warning(e) except cfg_exceptions.DriverException as e: with excutils.save_and_reraise_exception(): self._update_updated_routers_cache([ri.router_id]) LOG.error(e)
def _process_router(self, ri)
Process a router, apply latest configuration and update router_info. Get the router dict from RouterInfo and proceed to detect changes from the last known state. When new ports or deleted ports are detected, `internal_network_added()` or `internal_networks_removed()` are called accordingly. Similarly changes in ex_gw_port causes `external_gateway_added()` or `external_gateway_removed()` calls. Next, floating_ips and routes are processed. Also, latest state is stored in ri.internal_ports and ri.ex_gw_port for future comparisons. :param ri : RouterInfo object of the router being processed. :return:None :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. DriverException if the configuration operation fails.
3.001156
2.883728
1.040721
ri = RouterInfo(router_id, router) driver = self.driver_manager.set_driver(router) if router[ROUTER_ROLE_ATTR] in [ c_constants.ROUTER_ROLE_GLOBAL, c_constants.ROUTER_ROLE_LOGICAL_GLOBAL]: # No need to create a vrf for Global or logical global routers LOG.debug("Skipping router_added device processing for %(id)s as " "its role is %(role)s", {'id': router_id, 'role': router[ROUTER_ROLE_ATTR]}) else: driver.router_added(ri) self.router_info[router_id] = ri
def _router_added(self, router_id, router)
Operations when a router is added. Create a new RouterInfo object for this router and add it to the service helpers router_info dictionary. Then `router_added()` is called on the device driver. :param router_id: id of the router :param router: router dict :return: None
3.868784
3.792079
1.020228
ri = self.router_info.get(router_id) if ri is None: LOG.warning("Info for router %s was not found. " "Skipping router removal.", router_id) return ri.router['gw_port'] = None ri.router[bc.constants.INTERFACE_KEY] = [] ri.router[bc.constants.FLOATINGIP_KEY] = [] try: hd = ri.router['hosting_device'] # We proceed to removing the configuration from the device # only if (a) deconfigure is set to True (default) # (b) the router's hosting device is reachable. if (deconfigure and self._dev_status.is_hosting_device_reachable(hd)): self._process_router(ri) driver = self.driver_manager.get_driver(router_id) driver.router_removed(ri) self.driver_manager.remove_driver(router_id) del self.router_info[router_id] self._del_from_removed_routers_cache(router_id) except cfg_exceptions.DriverException: LOG.warning("Router remove for router_id: %s was incomplete. " "Adding the router to removed_routers list", router_id) self._update_removed_routers_cache([router_id]) # remove this router from updated_routers if it is there. It might # end up there too if exception was thrown earlier inside # `_process_router()` self._del_from_updated_routers_cache(router_id) except ncc_errors.SessionCloseError as e: LOG.exception("ncclient Unexpected session close %s" " while attempting to remove router", e) if not self._dev_status.is_hosting_device_reachable(hd): LOG.debug("Lost connectivity to Hosting Device %s" % hd['id']) # rely on heartbeat to detect HD state # and schedule resync when the device comes back else: # retry the router removal on the next pass self._update_removed_routers_cache([router_id]) LOG.debug("Interim connectivity lost to hosting device %s, " "enqueuing router %s in removed_routers set" % pp.pformat(hd), router_id)
def _router_removed(self, router_id, deconfigure=True)
Operations when a router is removed. Get the RouterInfo object corresponding to the router in the service helpers's router_info dict. If deconfigure is set to True, remove this router's configuration from the hosting device. :param router_id: id of the router :param deconfigure: if True, the router's configuration is deleted from the hosting device. :return: None
4.668756
4.524796
1.031816
new_routes = ri.router['routes'] old_routes = ri.routes adds, removes = bc.common_utils.diff_list_of_dict(old_routes, new_routes) for route in adds: LOG.debug("Added route entry is '%s'", route) # remove replaced route from deleted route for del_route in removes: if route['destination'] == del_route['destination']: removes.remove(del_route) driver = self.driver_manager.get_driver(ri.id) driver.routes_updated(ri, 'replace', route) for route in removes: LOG.debug("Removed route entry is '%s'", route) driver = self.driver_manager.get_driver(ri.id) driver.routes_updated(ri, 'delete', route) ri.routes = new_routes
def _routes_updated(self, ri)
Update the state of routes in the router. Compares the current routes with the (configured) existing routes and detect what was removed or added. Then configure the logical router in the hosting device accordingly. :param ri: RouterInfo corresponding to the router. :return: None :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. DriverException if the configuration operation fails.
3.034444
2.858589
1.061518
config = conn.get_config(source="running") if config: root = ET.fromstring(config._raw) running_config = root[0][0] rgx = re.compile("\r*\n+") ioscfg = rgx.split(running_config.text) return ioscfg
def get_running_config(self, conn)
Get the ASR1k's current running config. :return: Current IOS running config as multiline string
4.818781
4.817274
1.000313
if gw_port is not None: found = False for i in range(len(gw_port['fixed_ips'])): target_ip = gw_port['fixed_ips'][i]['ip_address'] if ip_addr == target_ip: found = True break if found is False: LOG.info("Subintf real IP is incorrect, deleting") return False subnet_id = gw_port['fixed_ips'][i]['subnet_id'] subnet = next( sn for sn in gw_port['subnets'] if sn['id'] == subnet_id) target_net = netaddr.IPNetwork(subnet['cidr']) if netmask != str(target_net.netmask): LOG.info("Subintf has incorrect netmask, deleting") return False return True return False
def subintf_real_ip_check_gw_port(self, gw_port, ip_addr, netmask)
checks running-cfg derived ip_addr and netmask against neutron-db gw_port
2.460267
2.448006
1.005009
def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: LOG.debug("%(err_mess)s. Retry calling function " "'%(f_name)s' in %(delta)d seconds.", {'err_mess': str(e), 'f_name': f.__name__, 'delta': mdelay}) time.sleep(mdelay) mtries -= 1 mdelay *= backoff LOG.debug("Last retry calling function '%s'.", f.__name__) return f(*args, **kwargs) return f_retry # true decorator return deco_retry
def retry(ExceptionToCheck, tries=4, delay=3, backoff=2)
Retry calling the decorated function using an exponential backoff. Reference: http://www.saltycrane.com/blog/2009/11/trying-out-retry -decorator-python/ :param ExceptionToCheck: the exception to check. may be a tuple of exceptions to check :param tries: number of times to try (not retry) before giving up :param delay: initial delay between retries in seconds :param backoff: backoff multiplier e.g. value of 2 will double the delay each retry
2.004122
2.035353
0.984655
try: if self._ncc_connection and self._ncc_connection.connected: return self._ncc_connection else: # ncclient needs 'name' to be 'csr' in order to communicate # with the device in the correct way. self._ncc_connection = manager.connect( host=self._host_ip, port=self._host_ssh_port, username=self._username, password=self._password, device_params={'name': "csr"}, timeout=self._timeout) if not self._itfcs_enabled: self._itfcs_enabled = self._enable_itfcs( self._ncc_connection) return self._ncc_connection except Exception as e: conn_params = {'host': self._host_ip, 'port': self._host_ssh_port, 'user': self._username, 'timeout': self._timeout, 'reason': e.message} raise cfg_exc.ConnectionException(**conn_params)
def _get_connection(self)
Make SSH connection to the IOS XE device. The external ncclient library is used for creating this connection. This method keeps state of any existing connections and reuses them if already connected. Also interfaces (except management) are typically disabled by default when it is booted. So if connecting for the first time, driver will enable all other interfaces and keep that status in the `_itfcs_enabled` flag.
3.478616
2.723422
1.277296
ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) itfcs_raw = parse.find_lines("^interface GigabitEthernet") itfcs = [raw_if.strip().split(' ')[1] for raw_if in itfcs_raw] LOG.debug("Interfaces on hosting device: %s", itfcs) return itfcs
def _get_interfaces(self)
Get a list of interfaces on this hosting device. :return: List of the interfaces
5.541726
5.19025
1.067719
ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) children = parse.find_children("^interface %s" % interface_name) for line in children: if 'ip address' in line: ip_address = line.strip().split(' ')[2] LOG.debug("IP Address:%s", ip_address) return ip_address LOG.warning("Cannot find interface: %s", interface_name) return None
def _get_interface_ip(self, interface_name)
Get the ip address for an interface. :param interface_name: interface_name as a string :return: ip address of interface as a string
3.763903
3.976063
0.946641
ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) itfcs_raw = parse.find_lines("^interface " + interface) return len(itfcs_raw) > 0
def _interface_exists(self, interface)
Check whether interface exists.
8.302159
7.440814
1.11576
vrfs = [] ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) vrfs_raw = parse.find_lines("^vrf definition") for line in vrfs_raw: # raw format ['ip vrf <vrf-name>',....] vrf_name = line.strip().split(' ')[2] vrfs.append(vrf_name) LOG.info("VRFs:%s", vrfs) return vrfs
def _get_vrfs(self)
Get the current VRFs configured in the device. :return: A list of vrf names as string
5.108826
5.129347
0.995999
conn = self._get_connection() capabilities = [] for c in conn.server_capabilities: capabilities.append(c) LOG.debug("Server capabilities: %s", capabilities) return capabilities
def _get_capabilities(self)
Get the servers NETCONF capabilities. :return: List of server capabilities.
3.580034
3.543658
1.010265
conn = self._get_connection() config = conn.get_config(source="running") if config: root = ET.fromstring(config._raw) running_config = root[0][0] if split is True: rgx = re.compile("\r*\n+") ioscfg = rgx.split(running_config.text) else: ioscfg = running_config.text return ioscfg
def _get_running_config(self, split=True)
Get the IOS XE device's current running config. :return: Current IOS running config as multiline string
3.583109
3.649361
0.981845
exp_cfg_lines = ['ip access-list standard ' + str(acl_no), ' permit ' + str(network) + ' ' + str(netmask)] ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) acls_raw = parse.find_children(exp_cfg_lines[0]) if acls_raw: if exp_cfg_lines[1] in acls_raw: return True LOG.error("Mismatch in ACL configuration for %s", acl_no) return False LOG.debug("%s is not present in config", acl_no) return False
def _check_acl(self, acl_no, network, netmask)
Check a ACL config exists in the running config. :param acl_no: access control list (ACL) number :param network: network which this ACL permits :param netmask: netmask of the network :return:
4.188474
4.102592
1.020934
ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) cfg_raw = parse.find_lines("^" + cfg_str) LOG.debug("_cfg_exists(): Found lines %s", cfg_raw) return len(cfg_raw) > 0
def _cfg_exists(self, cfg_str)
Check a partial config string exists in the running config. :param cfg_str: config string to check :return : True or False
6.470651
6.33736
1.021033
stack = inspect.stack() start = 0 + skip if len(stack) < start + 1: return '' parentframe = stack[start][0] name = [] module = inspect.getmodule(parentframe) # `modname` can be None when frame is executed directly in console # TODO(asr1kteam): consider using __main__ if module: name.append(module.__name__) # detect classname if 'self' in parentframe.f_locals: # I don't know any way to detect call from the object method # XXX: there seems to be no way to detect static method call, # it will be just a function call name.append(parentframe.f_locals['self'].__class__.__name__) codename = parentframe.f_code.co_name if codename != '<module>': # top level usually name.append(codename) # function or a method del parentframe return ".".join(name)
def caller_name(self, skip=2)
Get a name of a caller in the format module.class.method `skip` specifies how many levels of stack to skip while getting caller name. skip=1 means "who calls me", skip=2 "who calls my caller" etc. An empty string is returned if skipped levels exceed stack height
1.738573
1.614172
1.077068
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s", {'snippet_name': snippet_name, 'rpc_obj': rpc_obj.xml}) xml_str = rpc_obj.xml if "<ok />" in xml_str: # LOG.debug("RPCReply for %s is OK", snippet_name) LOG.info("%s was successfully executed", snippet_name) return True # Not Ok, we throw a ConfigurationException e_type = rpc_obj._root[0][0].text e_tag = rpc_obj._root[0][1].text params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag, 'dev_id': self.hosting_device['id'], 'ip': self._host_ip, 'confstr': conf_str} raise cfg_exc.IOSXEConfigException(**params)
def _check_response(self, rpc_obj, snippet_name, conf_str=None)
This function checks the rpc response object for status. This function takes as input the response rpc_obj and the snippet name that was executed. It parses it to see, if the last edit operation was a success or not. <?xml version="1.0" encoding="UTF-8"?> <rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c" xmlns="urn:ietf:params:netconf:base:1.0"> <ok /> </rpc-reply> In case of error, IOS XE device sends a response as follows. We take the error type and tag. <?xml version="1.0" encoding="UTF-8"?> <rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c" xmlns="urn:ietf:params:netconf:base:1.0"> <rpc-error> <error-type>protocol</error-type> <error-tag>operation-failed</error-tag> <error-severity>error</error-severity> </rpc-error> </rpc-reply> :return: True if the config operation completed successfully :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions. IOSXEConfigException
4.123865
3.366983
1.224795
search_opts = {'marker': None, 'all_tenants': True, 'project_id': project_id} try: servers = self._novaclnt.servers.list(True, search_opts) LOG.debug('_get_instances_for_project: servers=%s', servers) return servers except nexc.Unauthorized: emsg = (_LE('Failed to get novaclient:Unauthorised ' 'project_id=%(proj)s user=%(user)s'), {'proj': self._project_id, 'user': self._user_name}) LOG.exception(emsg) raise nexc.ClientException(emsg) except nexc.AuthorizationFailure as err: emsg = (_LE("Failed to get novaclient %s")) LOG.exception(emsg, err) raise nexc.ClientException(emsg % err)
def _get_instances_for_project(self, project_id)
Return all instances for a given project. :project_id: UUID of project (tenant)
3.358633
3.449095
0.973772
instance_name = self._inst_info_cache.get((uuid, project_id)) if instance_name: return instance_name instances = self._get_instances_for_project(project_id) for inst in instances: if inst.id.replace('-', '') == uuid: LOG.debug('get_instance_for_uuid: name=%s', inst.name) instance_name = inst.name self._inst_info_cache[(uuid, project_id)] = instance_name return instance_name return instance_name
def get_instance_for_uuid(self, uuid, project_id)
Return instance name for given uuid of an instance and project. :uuid: Instance's UUID :project_id: UUID of project (tenant)
2.219346
2.29879
0.965441
if cls._l3_tenant_uuid is None: if hasattr(cfg.CONF.keystone_authtoken, 'project_domain_id'): # TODO(sridar): hack for now to determing if keystone v3 # API is to be used. cls._l3_tenant_uuid = cls._get_tenant_id_using_keystone_v3() else: cls._l3_tenant_uuid = cls._get_tenant_id_using_keystone_v2() return cls._l3_tenant_uuid
def l3_tenant_id(cls)
Returns id of tenant owning hosting device resources.
3.189181
3.079879
1.035489
if cls._mgmt_nw_uuid is None: tenant_id = cls.l3_tenant_id() if not tenant_id: return net = bc.get_plugin().get_networks( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.management_network]}, ['id', 'subnets']) if len(net) == 1: num_subnets = len(net[0]['subnets']) if num_subnets == 0: LOG.error('The management network has no subnet. ' 'Please assign one.') return elif num_subnets > 1: LOG.info('The management network has %d subnets. The ' 'first one will be used.', num_subnets) cls._mgmt_nw_uuid = net[0].get('id') cls._mgmt_subnet_uuid = net[0]['subnets'][0] elif len(net) > 1: # Management network must have a unique name. LOG.error('The management network for does not have ' 'unique name. Please ensure that it is.') else: # Management network has not been created. LOG.error('There is no virtual management network. Please ' 'create one.') return cls._mgmt_nw_uuid
def mgmt_nw_id(cls)
Returns id of the management network.
2.914339
2.815511
1.035101
if not extensions.is_extension_supported(bc.get_plugin(), "security-group"): return if cls._mgmt_sec_grp_id is None: # Get the id for the _mgmt_security_group_id tenant_id = cls.l3_tenant_id() res = bc.get_plugin().get_security_groups( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [cfg.CONF.general.default_security_group]}, ['id']) if len(res) == 1: sec_grp_id = res[0].get('id', None) cls._mgmt_sec_grp_id = sec_grp_id elif len(res) > 1: # the mgmt sec group must be unique. LOG.error('The security group for the management network ' 'does not have unique name. Please ensure that ' 'it is.') else: # Service VM Mgmt security group is not present. LOG.error('There is no security group for the management ' 'network. Please create one.') return cls._mgmt_sec_grp_id
def mgmt_sec_grp_id(cls)
Returns id of security group used by the management network.
3.597998
3.377669
1.065231
if id is None: return try: return self._hosting_device_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._hosting_device_drivers[id] = importutils.import_object( template['device_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading hosting device driver for " "hosting device template %s", id) return self._hosting_device_drivers.get(id)
def get_hosting_device_driver(self, context, id)
Returns device driver for hosting device template with <id>.
2.425104
2.251306
1.077199
if id is None: return try: return self._plugging_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._plugging_drivers[id] = importutils.import_object( template['plugging_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading plugging driver for hosting " "device template %s", id) return self._plugging_drivers.get(id)
def get_hosting_device_plugging_driver(self, context, id)
Returns plugging driver for hosting device template with <id>.
2.544239
2.329226
1.092311
bound = hosting_device['tenant_bound'] if ((bound is not None and bound != resource['tenant_id']) or (exclusive and not self._exclusively_used(context, hosting_device, resource['tenant_id']))): LOG.debug( 'Rejecting allocation of %(num)d slots in tenant %(bound)s ' 'hosting device %(device)s to logical resource %(r_id)s due ' 'to exclusive use conflict.', {'num': num, 'bound': 'unbound' if bound is None else bound + ' bound', 'device': hosting_device['id'], 'r_id': resource['id']}) return False with context.session.begin(subtransactions=True): res_info = {'resource': resource, 'type': resource_type, 'service': resource_service} slot_info, query = self._get_or_create_slot_allocation( context, hosting_device, res_info) if slot_info is None: LOG.debug('Rejecting allocation of %(num)d slots in hosting ' 'device %(device)s to logical resource %(r_id)s', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']}) return False new_allocation = num + slot_info.num_allocated if hosting_device['template']['slot_capacity'] < new_allocation: LOG.debug('Rejecting allocation of %(num)d slots in ' 'hosting device %(device)s to logical resource ' '%(r_id)s due to insufficent slot availability.', {'num': num, 'device': hosting_device['id'], 'r_id': resource['id']}) self._dispatch_pool_maintenance_job(hosting_device['template']) return False # handle any changes to exclusive usage by tenant if exclusive and bound is None: self._update_hosting_device_exclusivity( context, hosting_device, resource['tenant_id']) bound = resource['tenant_id'] elif not exclusive and bound is not None: self._update_hosting_device_exclusivity(context, hosting_device, None) bound = None slot_info.num_allocated = new_allocation context.session.add(slot_info) self._dispatch_pool_maintenance_job(hosting_device['template']) # report success LOG.info('Allocated %(num)d additional slots in tenant %(bound)s' 'bound hosting device %(hd_id)s. In total %(total)d ' 'slots are now allocated in that hosting device for ' 'logical resource %(r_id)s.', {'num': num, 'bound': 'un-' if bound is None else bound + ' ', 'total': new_allocation, 'hd_id': hosting_device['id'], 'r_id': resource['id']}) return True
def acquire_hosting_device_slots(self, context, hosting_device, resource, resource_type, resource_service, num, exclusive=False)
Assign <num> slots in <hosting_device> to logical <resource>. If exclusive is True the hosting device is bound to the resource's tenant. Otherwise it is not bound to any tenant. Returns True if allocation was granted, False otherwise.
2.619653
2.602981
1.006405
with context.session.begin(subtransactions=True): num_str = str(num) if num >= 0 else "all" res_info = {'resource': resource} slot_info, query = self._get_or_create_slot_allocation( context, hosting_device, res_info, create=False) if slot_info is None: LOG.debug('Rejecting de-allocation of %(num)s slots in ' 'hosting device %(device)s for logical resource ' '%(id)s', {'num': num_str, 'device': hosting_device['id'], 'id': resource['id']}) return False if num >= 0: new_allocation = slot_info.num_allocated - num else: # if a negative num is specified all slot allocations for # the logical resource in the hosting device is removed new_allocation = 0 if new_allocation < 0: LOG.debug('Rejecting de-allocation of %(num)s slots in ' 'hosting device %(device)s for logical resource ' '%(id)s since only %(alloc)d slots are allocated.', {'num': num_str, 'device': hosting_device['id'], 'id': resource['id'], 'alloc': slot_info.num_allocated}) self._dispatch_pool_maintenance_job(hosting_device['template']) return False elif new_allocation == 0: result = query.delete() LOG.info('De-allocated %(num)s slots from hosting device ' '%(hd_id)s. %(total)d slots are now allocated in ' 'that hosting device.', {'num': num_str, 'total': new_allocation, 'hd_id': hosting_device['id']}) if (hosting_device['tenant_bound'] is not None and context.session.query(hd_models.SlotAllocation).filter_by( hosting_device_id=hosting_device['id']).first() is None): # make hosting device tenant unbound if no logical # resource use it anymore hosting_device['tenant_bound'] = None context.session.add(hosting_device) LOG.info('Making hosting device %(hd_id)s with no ' 'allocated slots tenant unbound.', {'hd_id': hosting_device['id']}) self._dispatch_pool_maintenance_job(hosting_device['template']) return result == 1 LOG.info('De-allocated %(num)s slots from hosting device ' '%(hd_id)s. %(total)d slots are now allocated in ' 'that hosting device.', {'num': num_str, 'total': new_allocation, 'hd_id': hosting_device['id']}) slot_info.num_allocated = new_allocation context.session.add(slot_info) self._dispatch_pool_maintenance_job(hosting_device['template']) # report success return True
def release_hosting_device_slots(self, context, hosting_device, resource, num)
Free <num> slots in <hosting_device> from logical resource <id>. Returns True if deallocation was successful. False otherwise.
2.456219
2.410308
1.019048
query = context.session.query(hd_models.HostingDevice) if load_agent: query = query.options(joinedload('cfg_agent')) if len(hosting_device_ids) > 1: query = query.filter(hd_models.HostingDevice.id.in_( hosting_device_ids)) else: query = query.filter(hd_models.HostingDevice.id == hosting_device_ids[0]) return query
def get_hosting_devices_qry(self, context, hosting_device_ids, load_agent=True)
Returns hosting devices with <hosting_device_ids>.
1.860411
1.839182
1.011542
for item in self._get_collection_query( context, hd_models.HostingDeviceTemplate): self.delete_all_hosting_devices_by_template( context, template=item, force_delete=force_delete)
def delete_all_hosting_devices(self, context, force_delete=False)
Deletes all hosting devices.
5.651994
5.584521
1.012082
plugging_drv = self.get_hosting_device_plugging_driver( context, template['id']) hosting_device_drv = self.get_hosting_device_driver(context, template['id']) if plugging_drv is None or hosting_device_drv is None: return is_vm = template['host_category'] == VM_CATEGORY query = context.session.query(hd_models.HostingDevice) query = query.filter(hd_models.HostingDevice.template_id == template['id']) for hd in query: if not (hd.auto_delete or force_delete): # device manager is not responsible for life cycle # management of this hosting device. continue res = plugging_drv.get_hosting_device_resources( context, hd.id, hd.complementary_id, self.l3_tenant_id(), self.mgmt_nw_id()) if is_vm: self.svc_vm_mgr.delete_service_vm(context, hd.id) plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) with context.session.begin(subtransactions=True): # remove all allocations in this hosting device context.session.query(hd_models.SlotAllocation).filter_by( hosting_device_id=hd['id']).delete() context.session.delete(hd)
def delete_all_hosting_devices_by_template(self, context, template, force_delete=False)
Deletes all hosting devices based on <template>.
3.210438
3.265415
0.983164
template = hosting_device_db.template mgmt_port = hosting_device_db.management_port mgmt_ip = (mgmt_port['fixed_ips'][0]['ip_address'] if mgmt_port else hosting_device_db.management_ip_address) return {'id': hosting_device_db.id, 'name': template.name, 'template_id': template.id, 'credentials': self._get_credentials(hosting_device_db), 'host_category': template.host_category, 'admin_state_up': hosting_device_db.admin_state_up, 'service_types': template.service_types, 'management_ip_address': mgmt_ip, 'protocol_port': hosting_device_db.protocol_port, 'timeout': None, 'created_at': str(hosting_device_db.created_at), 'status': hosting_device_db.status, 'booting_time': template.booting_time}
def get_device_info_for_agent(self, context, hosting_device_db)
Returns information about <hosting_device> needed by config agent. Convenience function that service plugins can use to populate their resources with information about the device hosting their logical resource.
2.304653
2.398803
0.960752
if (hosting_device['template']['host_category'] == VM_CATEGORY and hosting_device['auto_delete']): self._delete_dead_service_vm_hosting_device(context, hosting_device) return True return False
def _process_non_responsive_hosting_device(self, context, hosting_device)
Host type specific processing of non responsive hosting devices. :param hosting_device: db object for hosting device :return: True if hosting_device has been deleted, otherwise False
7.661079
8.469421
0.904558
#TODO(bobmel): Support HA/load-balanced Neutron servers: #TODO(bobmel): Locking across multiple running Neutron server instances lock = self._get_template_pool_lock(template['id']) acquired = lock.acquire(False) if not acquired: # pool maintenance for this template already ongoing, so abort return try: # Maintain a pool of approximately 'desired_slots_free' available # for allocation. Approximately means that # abs(desired_slots_free-capacity) <= available_slots <= # desired_slots_free+capacity capacity = template['slot_capacity'] if capacity == 0: return desired = template['desired_slots_free'] available = self._get_total_available_slots( context, template['id'], capacity) grow_threshold = abs(desired - capacity) if available <= grow_threshold: num_req = int(math.ceil(grow_threshold / (1.0 * capacity))) num_created = len(self._create_svc_vm_hosting_devices( context, num_req, template)) if num_created < num_req: LOG.warning('Requested %(requested)d instances based ' 'on hosting device template %(template)s ' 'but could only create %(created)d ' 'instances', {'requested': num_req, 'template': template['id'], 'created': num_created}) elif available >= desired + capacity: num_req = int( math.floor((available - desired) / (1.0 * capacity))) num_deleted = self._delete_idle_service_vm_hosting_devices( context, num_req, template) if num_deleted < num_req: LOG.warning('Tried to delete %(requested)d instances ' 'based on hosting device template ' '%(template)s but could only delete ' '%(deleted)d instances', {'requested': num_req, 'template': template['id'], 'deleted': num_deleted}) finally: lock.release()
def _maintain_hosting_device_pool(self, context, template)
Maintains the pool of hosting devices that are based on <template>. Ensures that the number of standby hosting devices (essentially service VMs) is kept at a suitable level so that resource creation is not slowed down by booting of the hosting device. :param context: context for this operation :param template: db object for hosting device template
3.813218
3.721
1.024783
hosting_devices = [] template_id = template['id'] credentials_id = template['default_credentials_id'] plugging_drv = self.get_hosting_device_plugging_driver(context, template_id) hosting_device_drv = self.get_hosting_device_driver(context, template_id) if plugging_drv is None or hosting_device_drv is None or num <= 0: return hosting_devices #TODO(bobmel): Determine value for max_hosted properly max_hosted = 1 # template['slot_capacity'] dev_data, mgmt_context = self._get_resources_properties_for_hd( template, credentials_id) credentials_info = self._credentials.get(credentials_id) if credentials_info is None: LOG.error('Could not find credentials for hosting device' 'template %s. Aborting VM hosting device creation.', template_id) return hosting_devices connectivity_info = self._get_mgmt_connectivity_info( context, self.mgmt_subnet_id()) for i in range(num): complementary_id = uuidutils.generate_uuid() res = plugging_drv.create_hosting_device_resources( context, complementary_id, self.l3_tenant_id(), mgmt_context, max_hosted) if res.get('mgmt_port') is None: # Required ports could not be created return hosting_devices connectivity_info['mgmt_port'] = res['mgmt_port'] vm_instance = self.svc_vm_mgr.dispatch_service_vm( context, template['name'] + '_nrouter', template['image'], template['flavor'], hosting_device_drv, credentials_info, connectivity_info, res.get('ports')) if vm_instance is not None: dev_data.update( {'id': vm_instance['id'], 'complementary_id': complementary_id, 'management_ip_address': res['mgmt_port'][ 'fixed_ips'][0]['ip_address'], 'management_port_id': res['mgmt_port']['id']}) self.create_hosting_device(context, {'hosting_device': dev_data}) hosting_devices.append(vm_instance) else: # Fundamental error like could not contact Nova # Cleanup anything we created plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) break LOG.info('Created %(num)d hosting device VMs based on template ' '%(t_id)s', {'num': len(hosting_devices), 't_id': template_id}) return hosting_devices
def _create_svc_vm_hosting_devices(self, context, num, template)
Creates <num> or less service VM instances based on <template>. These hosting devices can be bound to a certain tenant or for shared use. A list with the created hosting device VMs is returned.
3.671977
3.654273
1.004845
# Delete the "youngest" hosting devices since they are more likely # not to have finished booting num_deleted = 0 plugging_drv = self.get_hosting_device_plugging_driver(context, template['id']) hosting_device_drv = self.get_hosting_device_driver(context, template['id']) if plugging_drv is None or hosting_device_drv is None or num <= 0: return num_deleted query = context.session.query(hd_models.HostingDevice) query = query.outerjoin( hd_models.SlotAllocation, hd_models.HostingDevice.id == hd_models.SlotAllocation.hosting_device_id) query = query.filter(hd_models.HostingDevice.template_id == template['id'], hd_models.HostingDevice.admin_state_up == expr.true(), hd_models.HostingDevice.tenant_bound == expr.null(), hd_models.HostingDevice.auto_delete == expr.true()) query = query.group_by(hd_models.HostingDevice.id).having( func.count(hd_models.SlotAllocation.logical_resource_id) == 0) query = query.order_by( hd_models.HostingDevice.created_at.desc(), func.count(hd_models.SlotAllocation.logical_resource_id)) hd_candidates = query.all() num_possible_to_delete = min(len(hd_candidates), num) for i in range(num_possible_to_delete): res = plugging_drv.get_hosting_device_resources( context, hd_candidates[i]['id'], hd_candidates[i]['complementary_id'], self.l3_tenant_id(), self.mgmt_nw_id()) if self.svc_vm_mgr.delete_service_vm(context, hd_candidates[i]['id']): with context.session.begin(subtransactions=True): context.session.delete(hd_candidates[i]) plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) num_deleted += 1 LOG.info('Deleted %(num)d hosting devices based on template ' '%(t_id)s', {'num': num_deleted, 't_id': template['id']}) return num_deleted
def _delete_idle_service_vm_hosting_devices(self, context, num, template)
Deletes <num> or less unused <template>-based service VM instances. The number of deleted service vm instances is returned.
2.66402
2.676384
0.99538
if hosting_device is None: return plugging_drv = self.get_hosting_device_plugging_driver( context, hosting_device['template_id']) hosting_device_drv = self.get_hosting_device_driver( context, hosting_device['template_id']) if plugging_drv is None or hosting_device_drv is None: return res = plugging_drv.get_hosting_device_resources( context, hosting_device['id'], hosting_device['complementary_id'], self.l3_tenant_id(), self.mgmt_nw_id()) if not self.svc_vm_mgr.delete_service_vm(context, hosting_device['id']): LOG.error('Failed to delete hosting device %s service VM. ' 'Will un-register it anyway.', hosting_device['id']) plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) with context.session.begin(subtransactions=True): # remove all allocations in this hosting device context.session.query(hd_models.SlotAllocation).filter_by( hosting_device_id=hosting_device['id']).delete() context.session.delete(hosting_device)
def _delete_dead_service_vm_hosting_device(self, context, hosting_device)
Deletes a presumably dead <hosting_device> service VM. This will indirectly make all of its hosted resources unscheduled.
3.081045
3.094648
0.995604
query = context.session.query(hd_models.HostingDevice.id) query = query.outerjoin( hd_models.SlotAllocation, hd_models.HostingDevice.id == hd_models.SlotAllocation .hosting_device_id) query = query.filter( hd_models.HostingDevice.template_id == template_id, hd_models.HostingDevice.admin_state_up == expr.true(), hd_models.HostingDevice.tenant_bound == expr.null()) query = query.group_by(hd_models.HostingDevice.id) query = query.having( func.sum(hd_models.SlotAllocation.num_allocated) == expr.null()) num_hosting_devices = query.count() return num_hosting_devices * capacity
def _get_total_available_slots(self, context, template_id, capacity)
Returns available slots in idle devices based on <template_id>. Only slots in tenant unbound hosting devices are counted to ensure there is always hosting device slots available regardless of tenant.
2.553961
2.24298
1.138646
return (context.session.query(hd_models.SlotAllocation).filter( hd_models.SlotAllocation.hosting_device_id == hosting_device['id'], hd_models.SlotAllocation.logical_resource_owner != tenant_id). first() is None)
def _exclusively_used(self, context, hosting_device, tenant_id)
Checks if only <tenant_id>'s resources use <hosting_device>.
4.43825
4.185892
1.060288
with context.session.begin(subtransactions=True): hosting_device['tenant_bound'] = tenant_id context.session.add(hosting_device) for item in (context.session.query(hd_models.SlotAllocation). filter_by(hosting_device_id=hosting_device['id'])): item['tenant_bound'] = tenant_id context.session.add(item)
def _update_hosting_device_exclusivity(self, context, hosting_device, tenant_id)
Make <hosting device> bound or unbound to <tenant_id>. If <tenant_id> is None the device is unbound, otherwise it gets bound to that <tenant_id>
2.925262
2.954373
0.990147
try: return self._hosting_device_locks[id] except KeyError: self._hosting_device_locks[id] = threading.Lock() return self._hosting_device_locks.get(id)
def _get_template_pool_lock(self, id)
Returns lock object for hosting device template with <id>.
3.025875
2.46007
1.229995
hdt_dict = config.get_specific_config('cisco_hosting_device_template') attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[ ciscohostingdevicemanager.DEVICE_TEMPLATES] adm_context = bc.context.get_admin_context() for hdt_uuid, kv_dict in hdt_dict.items(): # ensure hdt_uuid is properly formatted hdt_uuid = config.uuidify(hdt_uuid) try: self.get_hosting_device_template(adm_context, hdt_uuid) is_create = False except ciscohostingdevicemanager.HostingDeviceTemplateNotFound: is_create = True kv_dict['id'] = hdt_uuid kv_dict['tenant_id'] = self.l3_tenant_id() config.verify_resource_dict(kv_dict, True, attr_info) hdt = {ciscohostingdevicemanager.DEVICE_TEMPLATE: kv_dict} try: if is_create: self.create_hosting_device_template(adm_context, hdt) else: self.update_hosting_device_template(adm_context, kv_dict['id'], hdt) except n_exc.NeutronException: with excutils.save_and_reraise_exception(): LOG.error('Invalid hosting device template definition ' 'in configuration file for template = %s', hdt_uuid)
def _create_hosting_device_templates_from_config(self)
To be called late during plugin initialization so that any hosting device templates defined in the config file is properly inserted in the DB.
3.060313
2.964967
1.032157
hd_dict = config.get_specific_config('cisco_hosting_device') attr_info = ciscohostingdevicemanager.RESOURCE_ATTRIBUTE_MAP[ ciscohostingdevicemanager.DEVICES] adm_context = bc.context.get_admin_context() for hd_uuid, kv_dict in hd_dict.items(): # ensure hd_uuid is properly formatted hd_uuid = config.uuidify(hd_uuid) try: old_hd = self.get_hosting_device(adm_context, hd_uuid) is_create = False except ciscohostingdevicemanager.HostingDeviceNotFound: old_hd = {} is_create = True kv_dict['id'] = hd_uuid kv_dict['tenant_id'] = self.l3_tenant_id() # make sure we keep using same config agent if it has been assigned kv_dict['cfg_agent_id'] = old_hd.get('cfg_agent_id') # make sure we keep using management port if it exists kv_dict['management_port_id'] = old_hd.get('management_port_id') config.verify_resource_dict(kv_dict, True, attr_info) hd = {ciscohostingdevicemanager.DEVICE: kv_dict} try: if is_create: self.create_hosting_device(adm_context, hd) else: self.update_hosting_device(adm_context, kv_dict['id'], hd) except n_exc.NeutronException: with excutils.save_and_reraise_exception(): LOG.error('Invalid hosting device specification in ' 'configuration file for device = %s', hd_uuid)
def _create_hosting_devices_from_config(self)
To be called late during plugin initialization so that any hosting device specified in the config file is properly inserted in the DB.
3.510669
3.417238
1.027341
res_path = hostingdevice.HostingDevice.resource_path return client.post((res_path + DEVICE_L3_ROUTERS) % hosting_device_id, body=body)
def add_router_to_hosting_device(self, client, hosting_device_id, body)
Adds a router to hosting device.
14.276155
14.980394
0.952989
res_path = hostingdevice.HostingDevice.resource_path return client.delete((res_path + DEVICE_L3_ROUTERS + "/%s") % ( hosting_device_id, router_id))
def remove_router_from_hosting_device(self, client, hosting_device_id, router_id)
Remove a router from hosting_device.
8.752613
8.599152
1.017846
res_path = hostingdevice.HostingDevice.resource_path return client.get((res_path + DEVICE_L3_ROUTERS) % hosting_device_id, params=_params)
def list_routers_on_hosting_device(self, client, hosting_device_id, **_params)
Fetches a list of routers hosted on a hosting device.
16.786625
13.747005
1.221111
return client.get((client.router_path + L3_ROUTER_DEVICES) % router_id, params=_params)
def list_hosting_devices_hosting_routers(self, client, router_id, **_params)
Fetches a list of hosting devices hosting a router.
28.975754
21.943768
1.320455
# Setup RPC client. self.clnt = rpc.DfaRpcClient(self._url, constants.DFA_SERVER_QUEUE, exchange=constants.DFA_EXCHANGE)
def setup_client_rpc(self)
Setup RPC client for dfa agent.
11.10524
7.170763
1.548683
endpoints = RpcCallBacks(self._vdpm, self._iptd) self.server = rpc.DfaRpcServer(self._qn, self._my_host, self._url, endpoints, exchange=constants.DFA_EXCHANGE)
def setup_rpc(self)
Setup RPC server for dfa agent.
24.94968
17.785948
1.402775
if ri.ex_gw_port or ri.router.get('gw_port'): driver = self.driver_manager.get_driver(ri.id) vrf_name = driver._get_vrf_name(ri) if not vrf_name: return if not self._router_ids_by_vrf.get(vrf_name): LOG.debug("++ CREATING VRF %s" % vrf_name) driver._do_create_vrf(vrf_name) self._router_ids_by_vrf.setdefault(vrf_name, set()).add( ri.router['id'])
def _add_rid_to_vrf_list(self, ri)
Add router ID to a VRF list. In order to properly manage VRFs in the ASR, their usage has to be tracked. VRFs are provided with neutron router objects in their hosting_info fields of the gateway ports. This means that the VRF is only available when the gateway port of the router is set. VRFs can span routers, and even OpenStack tenants, so lists of routers that belong to the same VRF are kept in a dictionary, with the VRF name as the key.
3.149271
3.12317
1.008357
if ri.ex_gw_port or ri.router.get('gw_port'): driver = self.driver_manager.get_driver(ri.id) vrf_name = driver._get_vrf_name(ri) if self._router_ids_by_vrf.get(vrf_name) and ( ri.router['id'] in self._router_ids_by_vrf[vrf_name]): self._router_ids_by_vrf[vrf_name].remove(ri.router['id']) # If this is the last router in a VRF, then we can safely # delete the VRF from the router config (handled by the driver) if not self._router_ids_by_vrf.get(vrf_name): LOG.debug("++ REMOVING VRF %s" % vrf_name) driver._remove_vrf(ri) del self._router_ids_by_vrf[vrf_name]
def _remove_rid_from_vrf_list(self, ri)
Remove router ID from a VRF list. This removes a router from the list of routers that's kept in a map, using a VRF ID as the key. If the VRF exists, the router is removed from the list if it's present. If the last router in the list is removed, then the driver's method to remove the VRF is called and the map entry for that VRF is deleted.
2.807316
2.762153
1.016351
itfc_deleted = False driver = self.driver_manager.get_driver(ri.id) vrf_name = driver._get_vrf_name(ri) network_name = ex_gw_port['hosting_info'].get('network_name') if self._router_ids_by_vrf_and_ext_net.get( vrf_name, {}).get(network_name) and ( ri.router['id'] in self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]): # If this is the last port for this neutron router, # then remove this router from the list if len(ri.internal_ports) == 1 and port in ri.internal_ports: self._router_ids_by_vrf_and_ext_net[ vrf_name][network_name].remove(ri.router['id']) # Check if any other routers in this VRF have this network, # and if not, set the flag to remove the interface if not self._router_ids_by_vrf_and_ext_net[vrf_name].get( network_name): LOG.debug("++ REMOVING NETWORK %s" % network_name) itfc_deleted = True del self._router_ids_by_vrf_and_ext_net[ vrf_name][network_name] if not self._router_ids_by_vrf_and_ext_net.get(vrf_name): del self._router_ids_by_vrf_and_ext_net[vrf_name] driver.internal_network_removed(ri, port, itfc_deleted=itfc_deleted) if ri.snat_enabled and ex_gw_port: driver.disable_internal_network_NAT(ri, port, ex_gw_port, itfc_deleted=itfc_deleted)
def _internal_network_removed(self, ri, port, ex_gw_port)
Remove an internal router port Check to see if this is the last port to be removed for a given network scoped by a VRF (note: there can be different mappings between VRFs and networks -- 1-to-1, 1-to-n, n-to-1, n-to-n -- depending on the configuration and workflow used). If it is the last port, set the flag indicating that the internal sub-interface for that netowrk on the ASR should be deleted
2.578197
2.57551
1.001043
'''Get list of organization on DCNM.''' org_list = self.dcnm_client.list_organizations() if not org_list: print('No organization found.') return org_table = PrettyTable(['Organization Name']) for org in org_list: org_table.add_row([org['organizationName']]) print(org_table)
def do_list_organizations(self, line)
Get list of organization on DCNM.
3.282036
2.685405
1.222176
LOG.debug("create_routertype() called. Contents %s", routertype) rt = routertype['routertype'] with context.session.begin(subtransactions=True): routertype_db = l3_models.RouterType( id=self._get_id(rt), tenant_id=rt['tenant_id'], name=rt['name'], description=rt['description'], template_id=rt['template_id'], ha_enabled_by_default=rt['ha_enabled_by_default'], shared=rt['shared'], slot_need=rt['slot_need'], scheduler=rt['scheduler'], driver=rt['driver'], cfg_agent_service_helper=rt['cfg_agent_service_helper'], cfg_agent_driver=rt['cfg_agent_driver']) context.session.add(routertype_db) return self._make_routertype_dict(routertype_db)
def create_routertype(self, context, routertype)
Creates a router type. Also binds it to the specified hosting device template.
2.339861
2.382188
0.982232
return client.post((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES) % config_agent_id, body=body)
def associate_hosting_device_with_config_agent( self, client, config_agent_id, body)
Associates a hosting_device with a config agent.
13.003536
14.607141
0.890218
return client.delete((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES + "/%s") % ( config_agent_id, hosting_device_id))
def disassociate_hosting_device_with_config_agent( self, client, config_agent_id, hosting_device_id)
Disassociates a hosting_device with a config agent.
10.895055
12.038715
0.905001
return client.get((ConfigAgentHandlingHostingDevice.resource_path + CFG_AGENT_HOSTING_DEVICES) % cfg_agent_id, params=_params)
def list_hosting_device_handled_by_config_agent( self, client, cfg_agent_id, **_params)
Fetches a list of hosting devices handled by a config agent.
13.739067
12.051676
1.140013