code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
tenant_name = fw_dict.get('tenant_name')
try:
net, subnet = self._create_os_nwk(tenant_id, tenant_name, "in",
is_fw_virt=is_fw_virt)
if net is None or subnet is None:
return False
except Exception as exc:
# If Openstack network creation fails, IP address is released.
# Seg, VLAN creation happens only after network creation in
# Openstack is successful.
LOG.error("Creation of In Openstack Network failed tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
return False
ret = fw_const.OS_IN_NETWORK_CREATE_SUCCESS
net_dict = self.retrieve_dcnm_net_info(tenant_id, "in")
subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, "in")
# Very unlikely case, so nothing released.
if not net_dict or not subnet_dict:
LOG.error("Allocation of net,subnet failed Len net %(len_net)s"
"sub %(len_sub)s",
{'len_net': len(net_dict), 'len_sub': len(subnet_dict)})
ret = fw_const.OS_IN_NETWORK_CREATE_FAIL
# Updating the FW and Nwk DB
self.store_net_fw_db(tenant_id, net, net_dict, subnet_dict,
"in", 'SUCCESS', os_status=ret)
return True | def create_os_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Create the Openstack IN network and stores the values in DB. | 4.178375 | 4.012586 | 1.041317 |
serv_obj = self.get_service_obj(tenant_id)
fw_dict = serv_obj.get_fw_dict()
fw_id = fw_dict.get('fw_id')
fw_data, fw_data_dict = self.get_fw(fw_id)
if fw_data is None:
LOG.error("Unable to get fw_data for tenant %s", tenant_name)
return False
if direc == 'in':
net_id = fw_data.in_network_id
seg, vlan = self.get_in_seg_vlan(tenant_id)
subnet_dict = self.get_in_ip_addr(tenant_id)
else:
net_id = fw_data.out_network_id
seg, vlan = self.get_out_seg_vlan(tenant_id)
subnet_dict = self.get_out_ip_addr(tenant_id)
# Delete the Openstack Network
sub = subnet_dict.get('subnet')
try:
ret = self.os_helper.delete_network_all_subnets(net_id)
if not ret:
LOG.error("Delete network for ID %(net)s direct %(dir)s "
"failed", {'net': net_id, 'dir': direc})
return False
except Exception as exc:
LOG.error("Delete network for ID %(net)s direct %(dir)s failed"
" Exc %(exc)s",
{'net': net_id, 'dir': direc, 'exc': exc})
return False
# Release the segment, VLAN and subnet allocated
if not is_fw_virt:
self.service_vlans.release_segmentation_id(vlan)
self.service_segs.release_segmentation_id(seg)
self.release_subnet(sub, direc)
# Release the network DB entry
self.delete_network_db(net_id)
return True | def _delete_os_nwk(self, tenant_id, tenant_name, direc, is_fw_virt=False) | Delete the network created in Openstack.
Function to delete Openstack network, It also releases the associated
segmentation, VLAN and subnets. | 2.589296 | 2.45012 | 1.056804 |
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, "in",
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Deletion of In Openstack Network failed tenant "
"%(tenant)s Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
ret = False
# Updating the FW DB
if ret:
res = fw_const.OS_IN_NETWORK_DEL_SUCCESS
else:
res = fw_const.OS_IN_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret | def delete_os_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Deletes the Openstack In network and update the DB. | 3.283157 | 2.95812 | 1.109879 |
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, "out",
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Deletion of Out Openstack Network failed tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
ret = False
# Updating the FW DB
if ret:
res = fw_const.OS_OUT_NETWORK_DEL_SUCCESS
else:
res = fw_const.OS_OUT_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret | def delete_os_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Deletes the Openstack Out network and update the DB. | 3.193747 | 2.924394 | 1.092106 |
res = fw_const.OS_DUMMY_RTR_CREATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
try:
rtr_id = fw_dict.get('router_id')
if rtr_id is None:
LOG.error("Invalid router id, attaching dummy interface"
" failed")
return False
if is_fw_virt:
net_id = subnet_id = None
else:
net_id, subnet_id = (
self._attach_dummy_intf_rtr(tenant_id, tenant_name,
rtr_id))
if net_id is None or subnet_id is None:
LOG.error("Invalid net_id or subnet_id, creating dummy"
" interface failed")
return False
except Exception as exc:
# Function _attach_dummy_intf_rtr already took care of
# cleanup for error cases.
LOG.error("Creation of Openstack Router failed "
"tenant %(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.OS_DUMMY_RTR_CREATE_FAIL
self.store_fw_db_router(tenant_id, net_id, subnet_id, rtr_id, res)
return True | def create_os_dummy_rtr(self, tenant_id, fw_dict, is_fw_virt=False) | Create the dummy interface and attach it to router.
Attach the dummy interface to the Openstack router and store the
info in DB. | 3.107093 | 2.970562 | 1.045961 |
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
rtr_id = fw_dict.get('router_id')
if not rtr_id:
LOG.error("Invalid router id, deleting dummy interface"
" failed")
return False
if not is_fw_virt:
ret = self._delete_dummy_intf_rtr(tenant_id, tenant_name,
rtr_id)
except Exception as exc:
# Function _attach_dummy_intf_rtr already took care of
# cleanup for error cases.
LOG.error("Deletion of Openstack Router failed tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
ret = False
if ret:
res = fw_const.OS_DUMMY_RTR_DEL_SUCCESS
else:
res = fw_const.OS_DUMMY_RTR_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret | def delete_os_dummy_rtr(self, tenant_id, fw_dict, is_fw_virt=False) | Delete the Openstack Dummy router and store the info in DB. | 3.501328 | 3.409636 | 1.026892 |
tenant_name = fw_dict.get('tenant_name')
ret = self._create_service_nwk(tenant_id, tenant_name, 'in')
if ret:
res = fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS
LOG.info("In Service network created for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_IN_NETWORK_CREATE_FAIL
LOG.info("In Service network create failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | def create_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Create the DCNM In Network and store the result in DB. | 2.94957 | 2.77434 | 1.063161 |
tenant_name = fw_dict.get('tenant_name')
ret = self._delete_service_nwk(tenant_id, tenant_name, 'in')
if ret:
res = fw_const.DCNM_IN_NETWORK_DEL_SUCCESS
LOG.info("In Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_IN_NETWORK_DEL_FAIL
LOG.info("In Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | def delete_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Delete the DCNM In Network and store the result in DB. | 2.997145 | 2.813485 | 1.065278 |
res = fw_const.DCNM_IN_PART_UPDATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._update_partition_in_create(tenant_id, tenant_name)
except Exception as exc:
LOG.error("Update of In Partition failed for tenant %(tenant)s"
" Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_IN_PART_UPDATE_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("In partition updated with service ip addr")
return ret | def update_dcnm_in_part(self, tenant_id, fw_dict, is_fw_virt=False) | Update DCNM's in partition information.
Update the In partition service node IP address in DCNM and
update the result | 3.636791 | 3.475901 | 1.046287 |
res = fw_const.DCNM_IN_PART_UPDDEL_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._update_partition_in_delete(tenant_name)
except Exception as exc:
LOG.error("Clear of In Partition failed for tenant %(tenant)s"
" , Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_IN_PART_UPDDEL_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("In partition cleared off service ip addr")
return ret | def clear_dcnm_in_part(self, tenant_id, fw_dict, is_fw_virt=False) | Clear the DCNM in partition service information.
Clear the In partition service node IP address in DCNM and update the
result. | 4.473464 | 4.156216 | 1.076331 |
res = fw_const.DCNM_OUT_PART_CREATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._create_out_partition(tenant_id, tenant_name)
except Exception as exc:
LOG.error("Create of Out Partition failed for tenant "
"%(tenant)s ,Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_CREATE_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition created")
return ret | def create_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False) | Create the DCNM OUT partition and update the result. | 2.849386 | 2.782669 | 1.023976 |
res = fw_const.DCNM_OUT_PART_DEL_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
self._delete_partition(tenant_id, tenant_name)
except Exception as exc:
LOG.error("deletion of Out Partition failed for tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_DEL_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition deleted")
return ret | def delete_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False) | Delete the DCNM OUT partition and update the result. | 2.937738 | 2.811763 | 1.044803 |
tenant_name = fw_dict.get('tenant_name')
ret = self._create_service_nwk(tenant_id, tenant_name, 'out')
if ret:
res = fw_const.DCNM_OUT_NETWORK_CREATE_SUCCESS
LOG.info("out Service network created for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_CREATE_FAIL
LOG.info("out Service network create failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | def create_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Create the DCNM OUT Network and update the result. | 3.002852 | 2.799345 | 1.072698 |
tenant_name = fw_dict.get('tenant_name')
ret = self._delete_service_nwk(tenant_id, tenant_name, 'out')
if ret:
res = fw_const.DCNM_OUT_NETWORK_DEL_SUCCESS
LOG.info("out Service network deleted for tenant %s",
tenant_id)
else:
res = fw_const.DCNM_OUT_NETWORK_DEL_FAIL
LOG.info("out Service network deleted failed for tenant %s",
tenant_id)
self.update_fw_db_result(tenant_id, dcnm_status=res)
return ret | def delete_dcnm_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False) | Delete the DCNM OUT network and update the result. | 2.969695 | 2.79274 | 1.063362 |
res = fw_const.DCNM_OUT_PART_UPDATE_SUCCESS
tenant_name = fw_dict.get('tenant_name')
ret = True
try:
ret = self._update_partition_out_create(tenant_id, tenant_name)
if not ret:
res = fw_const.DCNM_OUT_PART_UPDATE_FAIL
except Exception as exc:
LOG.error("Update of Out Partition failed for tenant "
"%(tenant)s Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
res = fw_const.DCNM_OUT_PART_UPDATE_FAIL
ret = False
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition updated with service ip addr")
return ret | def update_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False) | Update DCNM OUT partition service node IP address and result. | 3.199207 | 3.061453 | 1.044996 |
res = fw_const.DCNM_OUT_PART_UPDDEL_SUCCESS
self.update_fw_db_result(tenant_id, dcnm_status=res)
LOG.info("Out partition cleared -noop- with service ip addr")
return True | def clear_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False) | Clear DCNM out partition information.
Clear the DCNM OUT partition service node IP address and update
the result | 14.740334 | 12.391452 | 1.189557 |
if ret:
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
return state
else:
return state + 1
else:
return state | def get_next_create_state(self, state, ret) | Return the next create state from previous state. | 5.694981 | 5.238773 | 1.087083 |
if ret:
if state == fw_const.INIT_STATE:
return state
else:
return state - 1
else:
return state | def get_next_del_state(self, state, ret) | Return the next delete state from previous state. | 5.390104 | 4.685828 | 1.150299 |
if oper == fw_const.FW_CR_OP:
return self.get_next_create_state(state, ret)
else:
return self.get_next_del_state(state, ret) | def get_next_state(self, state, ret, oper) | Returns the next state for a create or delete operation. | 4.530061 | 2.649558 | 1.709742 |
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_CR_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][0](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_dict.get(state))
state = self.get_next_state(state, ret, fw_const.FW_CR_OP)
serv_obj.store_state(state)
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
break
if ret:
serv_obj.store_local_final_result(fw_const.RESULT_FW_CREATE_DONE)
return ret | def run_create_sm(self, tenant_id, fw_dict, is_fw_virt) | Runs the create State Machine.
Goes through every state function until the end or when one state
returns failure. | 3.540843 | 3.42068 | 1.035128 |
# Read the current state from the DB
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_DEL_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_DELETE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][1](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_del_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_del_dict.get(state))
if state == fw_const.INIT_STATE:
break
state = self.get_next_state(state, ret, fw_const.FW_DEL_OP)
serv_obj.store_state(state)
return ret | def run_delete_sm(self, tenant_id, fw_dict, is_fw_virt) | Runs the delete State Machine.
Goes through every state function until the end or when one state
returns failure. | 3.930246 | 3.826124 | 1.027213 |
for key, val in state_dict.items():
if val == status:
return key | def get_key_state(self, status, state_dict) | Returns the key associated with the dict. | 3.880008 | 2.563239 | 1.513713 |
res_list = compl_result.split('(')
state_num = None
if len(res_list) > 1:
state_num = int(res_list[1].split(')')[0])
else:
if res_list[0] == fw_const.RESULT_FW_CREATE_INIT:
if os_status is None:
state_num = fw_const.INIT_STATE
elif res_list[0] == fw_const.RESULT_FW_CREATE_DONE:
state_num = fw_const.FABRIC_PREPARE_DONE_STATE
elif res_list[0] == fw_const.RESULT_FW_DELETE_INIT:
if os_status == fw_const.OS_CREATE_SUCCESS and (
dcnm_status == fw_const.DCNM_CREATE_SUCCESS):
state_num = fw_const.FABRIC_PREPARE_DONE_STATE
return state_num | def pop_fw_state(self, compl_result, os_status, dcnm_status) | Populate the state information in the cache.
Check if state information is embedded in result
If not:
a. It's still in Init state and no SM is called yet
b. The SM has completely run
c. Delete has started and before any SM is run, it restarted. | 2.422746 | 2.454076 | 0.987234 |
net = self.get_network(net_id)
serv_obj = self.get_service_obj(tenant_id)
serv_obj.update_fw_local_cache(net_id, direc, node_ip)
if net is not None:
net_dict = self.fill_dcnm_net_info(tenant_id, direc, net.vlan,
net.segmentation_id)
serv_obj.store_dcnm_net_dict(net_dict, direc)
if direc == "in":
subnet = self.service_in_ip.get_subnet_by_netid(net_id)
else:
subnet = self.service_out_ip.get_subnet_by_netid(net_id)
if subnet is not None:
subnet_dict = self.fill_dcnm_subnet_info(
tenant_id, subnet,
self.get_start_ip(subnet), self.get_end_ip(subnet),
self.get_gateway(subnet), self.get_secondary_gateway(subnet),
direc)
serv_obj.store_dcnm_subnet_dict(subnet_dict, direc) | def pop_fw_local(self, tenant_id, net_id, direc, node_ip) | Populate the local cache.
Read the Network DB and populate the local cache.
Read the subnet from the Subnet DB, given the net_id and populate the
cache. | 2.530107 | 2.465214 | 1.026323 |
tenant_id = fw_data.get('tenant_id')
self.create_serv_obj(tenant_id)
serv_obj = self.get_service_obj(tenant_id)
serv_obj.create_fw_db(fw_id, fw_data.get('name'), tenant_id)
self.pop_fw_local(tenant_id, fw_data.get('in_network_id'), "in",
fw_data.get('in_service_node_ip'))
self.pop_fw_local(tenant_id, fw_data.get('out_network_id'), "out",
fw_data.get('out_service_node_ip'))
serv_obj.update_fw_local_result_str(fw_data.get('os_status'),
fw_data.get('dcnm_status'),
fw_data.get('device_status'))
compl_res = fw_data.get('result')
result = compl_res.split('(')[0]
serv_obj.store_local_final_result(result)
state = self.pop_fw_state(compl_res, fw_data.get('os_status'),
fw_data.get('dcnm_status'))
if state is None:
LOG.error("Unable to get state complete result %(res)s"
" OS status %(os)s, dcnm status %(dcnm)s",
{'res': compl_res, 'os': fw_data.get('os_status'),
'dcnm': fw_data.get('dcnm_status')})
serv_obj.store_state(state, popl_db=False)
if state == fw_const.FABRIC_PREPARE_DONE_STATE:
serv_obj.set_fabric_create(True)
router_id = fw_data.get('router_id')
rout_net_id = fw_data.get('router_net_id')
rout_subnet_id = fw_data.get('router_subnet_id')
# Result is already populated above, so pass None below.
# And, the result passed should be a string
serv_obj.update_fw_local_router(rout_net_id, rout_subnet_id, router_id,
None) | def populate_local_cache_tenant(self, fw_id, fw_data) | Populate the cache for a given tenant.
Calls routines to Populate the in and out information.
Update the result information.
Populate the state information.
Populate the router information. | 3.079371 | 2.955554 | 1.041893 |
fw_dict = self.get_all_fw_db()
for fw_id in fw_dict:
LOG.info("Populating cache for FW %s", fw_id)
fw_data = fw_dict[fw_id]
self.populate_local_cache_tenant(fw_id, fw_data) | def populate_local_cache(self) | Populate the local cache from DB.
Read the entries from FW DB and Calls routines to populate the cache. | 3.498012 | 3.083251 | 1.134521 |
subnet_lst = set()
subnet_lst.add(subnet_id)
ret = self.os_helper.delete_intf_router(None, None, rtr_id, subnet_lst)
if not ret:
return ret
return self.os_helper.delete_network_all_subnets(net_id) | def delete_os_dummy_rtr_nwk(self, rtr_id, net_id, subnet_id) | Delete the dummy interface to the router. | 4.004504 | 3.930619 | 1.018797 |
if seg is not None:
self.service_segs.release_segmentation_id(seg)
if vlan is not None:
self.service_vlans.release_segmentation_id(vlan)
self.os_helper.delete_network_all_subnets(net_id)
# There's a chance that OS network got created but it's ID
# was not put in DB
# So, deleting networks in os that has part of the special
# name
self.os_helper.delete_network_subname(fw_const.IN_SERVICE_NWK)
self.delete_network_db(net_id)
self.clear_fw_entry_by_netid(net_id)
self.service_in_ip.release_subnet_by_netid(net_id)
self.service_out_ip.release_subnet_by_netid(net_id) | def delete_os_nwk_db(self, net_id, seg, vlan) | Delete the Openstack Network from the database.
Release the segmentation ID, VLAN associated with the net.
Delete the network given the partial name.
Delete the entry from Network DB, given the net ID.
Delete the entry from Firewall DB, given the net ID.
Release the IN/OUT sug=bnets associated with the net. | 5.379809 | 4.831573 | 1.11347 |
if not self.auto_nwk_create:
LOG.info("Auto network creation disabled")
return False
try:
tenant_name = fw_dict.get('tenant_name')
fw_id = fw_dict.get('fw_id')
fw_name = fw_dict.get('fw_name')
# TODO(padkrish) More than 1 FW per tenant not supported.
if tenant_id in self.service_attr and (
result == fw_const.RESULT_FW_CREATE_DONE):
LOG.error("Fabric already prepared for tenant %(tenant)s,"
" %(name)s",
{'tenant': tenant_id, 'name': tenant_name})
return True
if tenant_id not in self.service_attr:
self.create_serv_obj(tenant_id)
self.service_attr[tenant_id].create_fw_db(fw_id, fw_name,
tenant_id)
ret = self.run_create_sm(tenant_id, fw_dict, is_fw_virt)
if ret:
LOG.info("SM create returned True for Tenant Name "
"%(tenant)s FW %(fw)s",
{'tenant': tenant_name, 'fw': fw_name})
self.service_attr[tenant_id].set_fabric_create(True)
else:
LOG.error("SM create returned False for Tenant Name "
"%(tenant)s FW %(fw)s",
{'tenant': tenant_name, 'fw': fw_name})
except Exception as exc:
LOG.error("Exception raised in create fabric int %s",
str(exc))
return False
return ret | def _prepare_fabric_fw_internal(self, tenant_id, fw_dict, is_fw_virt,
result) | Internal routine to prepare the fabric.
This creates an entry in FW DB and runs the SM. | 3.080527 | 2.917144 | 1.056008 |
try:
with self.mutex_lock:
ret = self._prepare_fabric_fw_internal(tenant_id, fw_dict,
is_fw_virt, result)
except Exception as exc:
LOG.error("Exception raised in create fabric %s", str(exc))
return False
return ret | def prepare_fabric_fw(self, tenant_id, fw_dict, is_fw_virt, result) | Top level routine to prepare the fabric. | 3.804219 | 3.798104 | 1.00161 |
if not self.auto_nwk_create:
LOG.info("Auto network creation disabled")
return False
try:
tenant_name = fw_dict.get('tenant_name')
fw_name = fw_dict.get('fw_name')
if tenant_id not in self.service_attr:
LOG.error("Service obj not created for tenant %s",
tenant_name)
return False
# A check for is_fabric_create is not needed since a delete
# may be issued even when create is not completely done.
# For example, some state such as create stuff in DCNM failed and
# SM for create is in the process of retrying. A delete can be
# issue at that time. If we have a check for is_fabric_create
# then delete operation will not go through.
if result == fw_const.RESULT_FW_DELETE_DONE:
LOG.error("Fabric for tenant %s already deleted",
tenant_id)
return True
ret = self.run_delete_sm(tenant_id, fw_dict, is_fw_virt)
self.service_attr[tenant_id].set_fabric_create(False)
if ret:
LOG.info("Delete SM completed successfully for tenant"
"%(tenant)s FW %(fw)s",
{'tenant': tenant_name, 'fw': fw_name})
self.service_attr[tenant_id].destroy_local_fw_db()
self.delete_serv_obj(tenant_id)
else:
LOG.error("Delete SM failed for tenant"
"%(tenant)s FW %(fw)s",
{'tenant': tenant_name, 'fw': fw_name})
# TODO(padkrish) Equivalent of create_fw_db for delete.
except Exception as exc:
LOG.error("Exception raised in delete fabric int %s",
str(exc))
return False
return ret | def delete_fabric_fw_internal(self, tenant_id, fw_dict, is_fw_virt,
result) | Internal routine to delete the fabric configuration.
This runs the SM and deletes the entries from DB and local cache. | 4.496861 | 4.382889 | 1.026004 |
try:
with self.mutex_lock:
ret = self.delete_fabric_fw_internal(tenant_id, fw_dict,
is_fw_virt, result)
except Exception as exc:
LOG.error("Exception raised in delete fabric %s", str(exc))
return False
return ret | def delete_fabric_fw(self, tenant_id, fw_dict, is_fw_virt, result) | Top level routine to unconfigure the fabric. | 3.368091 | 3.312189 | 1.016878 |
if not self.auto_nwk_create:
LOG.info("Auto network creation disabled")
return False
try:
# TODO(padkrish) More than 1 FW per tenant not supported
if tenant_id not in self.service_attr:
LOG.error("Tenant Obj not created")
return False
if result == fw_const.RESULT_FW_CREATE_INIT:
# A check for is_fabric_create is not done here.
ret = self.run_create_sm(tenant_id, fw_data, is_fw_virt)
else:
if result == fw_const.RESULT_FW_DELETE_INIT:
# A check for is_fabric_create is not done here.
# Pls check the comment given in function
# delete_fabric_fw_int
ret = self.run_delete_sm(tenant_id, fw_data, is_fw_virt)
else:
LOG.error("Unknown state in retry")
return False
self.service_attr[tenant_id].set_fabric_create(ret)
except Exception as exc:
LOG.error("Exception raised in create fabric int %s",
str(exc))
return False
return ret | def retry_failure_internal(self, tenant_id, tenant_name, fw_data,
is_fw_virt, result) | Internal routine to retry the failed cases. | 4.408823 | 4.405517 | 1.00075 |
try:
with self.mutex_lock:
ret = self.retry_failure_internal(tenant_id, tenant_name,
fw_data, is_fw_virt, result)
except Exception as exc:
LOG.error("Exception raised in create fabric %s", str(exc))
return False
return ret | def retry_failure(self, tenant_id, tenant_name, fw_data, is_fw_virt,
result) | Top level retry failure routine. | 3.950067 | 3.746532 | 1.054326 |
conf_dict = {}
for uuid, val in cfg.CONF.get(prefix, {}).items():
conf_dict[uuid] = dict(val)
return conf_dict | def get_specific_config(prefix) | Retrieve config based on the format [<prefix>:<value>].
returns: a dict, {<UUID>: {<key1>:<value1>, <key2>:<value2>, ...}} | 4.845345 | 4.428593 | 1.094105 |
if ((bc.NEUTRON_VERSION >= bc.NEUTRON_NEWTON_VERSION) and 'tenant_id'
in res_dict):
res_dict['project_id'] = res_dict['tenant_id']
if is_create: # POST
for attr, attr_vals in six.iteritems(attr_info):
if attr_vals['allow_post']:
if 'default' not in attr_vals and attr not in res_dict:
msg = _("Failed to parse request. Required attribute '%s' "
"not specified") % attr
raise webob.exc.HTTPBadRequest(msg)
res_dict[attr] = res_dict.get(attr, attr_vals.get('default'))
else:
if attr in res_dict:
msg = _("Attribute '%s' not allowed in POST") % attr
raise webob.exc.HTTPBadRequest(msg)
else: # PUT
for attr, attr_vals in six.iteritems(attr_info):
if attr in res_dict and not attr_vals['allow_put']:
msg = _("Cannot update read-only attribute %s") % attr
raise webob.exc.HTTPBadRequest(msg)
for attr, attr_vals in six.iteritems(attr_info):
if (attr not in res_dict or
res_dict[attr] is bc.constants.ATTR_NOT_SPECIFIED):
continue
# Convert values if necessary
if 'convert_to' in attr_vals:
res_dict[attr] = attr_vals['convert_to'](res_dict[attr])
# Check that configured values are correct
if 'validate' not in attr_vals:
continue
for rule in attr_vals['validate']:
_ensure_format(rule, attr, res_dict)
res = bc.validators[rule](res_dict[attr],
attr_vals['validate'][rule])
if res:
msg_dict = dict(attr=attr, reason=res)
msg = (_("Invalid input for %(attr)s. Reason: %(reason)s.") %
msg_dict)
raise webob.exc.HTTPBadRequest(msg)
return res_dict | def verify_resource_dict(res_dict, is_create, attr_info) | Verifies required attributes are in resource dictionary, res_dict.
Also checking that an attribute is only specified if it is allowed
for the given operation (create/update).
Attribute with default values are considered to be optional.
This function contains code taken from function 'prepare_request_body' in
attributes.py. | 2.301891 | 2.282218 | 1.00862 |
if uuidutils.is_uuid_like(val):
return val
else:
try:
int_val = int(val, 16)
except ValueError:
with excutils.save_and_reraise_exception():
LOG.error("Invalid UUID format %s. Please provide an "
"integer in decimal (0-9) or hex (0-9a-e) "
"format", val)
res = str(int_val)
num = 12 - len(res)
return "00000000-0000-0000-0000-" + "0" * num + res | def uuidify(val) | Takes an integer and transforms it to a UUID format.
returns: UUID formatted version of input. | 2.968313 | 3.060082 | 0.970011 |
if rule == 'type:uuid' or (rule == 'type:uuid_or_none' and
res_dict[attribute]):
res_dict[attribute] = uuidify(res_dict[attribute])
elif rule == 'type:uuid_list':
if not res_dict[attribute]:
res_dict[attribute] = []
else:
temp_list = res_dict[attribute].split(':')
res_dict[attribute] = []
for item in temp_list:
res_dict[attribute].append = uuidify(item)
elif rule == 'type:string_or_none' and res_dict[attribute] == "":
res_dict[attribute] = None | def _ensure_format(rule, attribute, res_dict) | Verifies that attribute in res_dict is properly formatted.
Since, in the .ini-files, lists are specified as ':' separated text and
UUID values can be plain integers we need to transform any such values
into proper format. Empty strings are converted to None if validator
specifies that None value is accepted. | 2.482276 | 2.377728 | 1.04397 |
cred_dict = get_specific_config('cisco_hosting_device_credential')
attr_info = {
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'is_visible': True,
'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'user_name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'password': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'type': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'is_visible': True,
'default': ''}}
credentials = {}
for cred_uuid, kv_dict in cred_dict.items():
# ensure cred_uuid is properly formatted
cred_uuid = uuidify(cred_uuid)
verify_resource_dict(kv_dict, True, attr_info)
credentials[cred_uuid] = kv_dict
return credentials | def obtain_hosting_device_credentials_from_config() | Obtains credentials from config file and stores them in memory.
To be called before hosting device templates defined in the config file
are created. | 2.028758 | 2.042733 | 0.993159 |
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]:
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
bc.constants.L3) | def get_resources(cls) | Returns Ext Resources. | 4.775208 | 4.443161 | 1.074732 |
pass | def get_routertypes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False) | Lists defined router types. | 5,088.733398 | 2,956.161133 | 1.721399 |
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=alembic_migrations.VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose() | def run_migrations_online() | Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context. | 3.108501 | 3.109146 | 0.999793 |
'''
Check whether the Launch Paramters set the role.
'''
return self.roles and any([re.search(role, our_role, re.I)
for our_role in self.roles]) | def has_role(self, role) | Check whether the Launch Paramters set the role. | 12.465363 | 4.388746 | 2.840302 |
'''
Return the full, given, or family name if set.
'''
if self.lis_person_name_given:
return self.lis_person_name_given
elif self.lis_person_name_family:
return self.lis_person_name_family
elif self.lis_person_name_full:
return self.lis_person_name_full
else:
return default | def username(self, default=None) | Return the full, given, or family name if set. | 2.735395 | 1.763833 | 1.550824 |
'''
POSTs the given score to the Tool Consumer with a replaceResult.
Returns OutcomeResponse object and stores it in self.outcome_request
OPTIONAL:
result_data must be a dictionary
Note: ONLY ONE of these values can be in the dict at a time,
due to the Canvas specification.
'text' : str text
'url' : str url
'''
return self.new_request(outcome_opts).post_replace_result(score, result_data) | def post_replace_result(self, score, outcome_opts=defaultdict(lambda:None), result_data=None) | POSTs the given score to the Tool Consumer with a replaceResult.
Returns OutcomeResponse object and stores it in self.outcome_request
OPTIONAL:
result_data must be a dictionary
Note: ONLY ONE of these values can be in the dict at a time,
due to the Canvas specification.
'text' : str text
'url' : str url | 14.501911 | 1.71535 | 8.454199 |
'''
If the Tool Consumer sent a return URL, add any set messages to the
URL.
'''
if not self.launch_presentation_return_url:
return None
lti_message_fields = ['lti_errormsg', 'lti_errorlog',
'lti_msg', 'lti_log']
messages = dict([(key, getattr(self, key))
for key in lti_message_fields
if getattr(self, key, None)])
# Disassemble original return URL and reassemble with our options added
original = urlsplit(self.launch_presentation_return_url)
combined = messages.copy()
combined.update(dict(parse_qsl(original.query)))
combined_query = urlencode(combined)
return urlunsplit((
original.scheme,
original.netloc,
original.path,
combined_query,
original.fragment
)) | def build_return_url(self) | If the Tool Consumer sent a return URL, add any set messages to the
URL. | 4.537421 | 3.21747 | 1.410245 |
'''
Shortcut for redirecting Django view to LTI Consumer with messages
'''
from django.shortcuts import redirect
self.lti_msg = msg
self.lti_log = log
return redirect(self.build_return_url()) | def success_redirect(self, msg='', log='') | Shortcut for redirecting Django view to LTI Consumer with messages | 8.046177 | 3.288554 | 2.446722 |
'''
Shortcut for redirecting Django view to LTI Consumer with errors
'''
from django.shortcuts import redirect
self.lti_errormsg = errormsg
self.lti_errorlog = errorlog
return redirect(self.build_return_url()) | def error_redirect(self, errormsg='', errorlog='') | Shortcut for redirecting Django view to LTI Consumer with errors | 6.570429 | 2.96547 | 2.215645 |
try:
with self.__private_things:
self.__private_things[to_lid] = self.__private_things.pop(from_lid)
except KeyError:
logger.warning('Thing %s renamed (to %s), but not in private lookup table', from_lid, to_lid)
else:
# renaming could happen before get_thing is called on the original
try:
with self.__new_things:
self.__new_things[to_lid] = self.__new_things.pop(from_lid)
except KeyError:
pass | def _notify_thing_lid_change(self, from_lid, to_lid) | Used by Thing instances to indicate that a rename operation has happened | 3.999624 | 3.843263 | 1.040684 |
if callback_parsed:
callback = self._get_parsed_feed_callback(callback_parsed, callback)
return self.__client.register_callback_feeddata(callback) | def register_catchall_feeddata(self, callback, callback_parsed=None) | Registers a callback that is called for all feeddata your Thing receives
`Example`
#!python
def feeddata_callback(data):
print(data)
...
client.register_catchall_feeddata(feeddata_callback)
`callback` (required) the function name that you want to be called on receipt of new feed data
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of feed data. This is
equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance.
More details on the contents of the `data` dictionary for feeds see:
[follow()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.follow) | 4.565852 | 7.466394 | 0.61152 |
if callback_parsed:
callback = self._get_parsed_control_callback(callback_parsed, callback)
return self.__client.register_callback_controlreq(callback) | def register_catchall_controlreq(self, callback, callback_parsed=None) | Registers a callback that is called for all control requests received by your Thing
`Example`
#!python
def controlreq_callback(data):
print(data)
...
client.register_catchall_controlreq(controlreq_callback)
`callback` (required) the function name that you want to be called on receipt of a new control request
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of a control ask/tell.
This is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both `callback_parsed` and
`callback` have been specified, the former takes precedence and `callback` is only called if the point data
could not be parsed according to its current value description.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control) | 4.497148 | 6.529525 | 0.68874 |
self.__client.register_callback_created(partial(self.__callback_payload_only, func), serialised=serialised) | def register_callback_created(self, func, serialised=True) | Register a callback for resource creation. This will be called when any *new* resource
is created within your agent. If `serialised` is not set, the callbacks might arrive
in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource created
lid : <name> # the local name of the resource
id : <GUID> # the global Id of the resource
epId : <GUID> # the global Id of your agent
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def created_callback(args):
print(args)
...
client.register_callback_created(created_callback)
This would print out something like the following on creation of an R_ENTITY
#!python
OrderedDict([(u'lid', u'new_thing1'), (u'r', 1),
(u'epId', u'ffd47b75ea786f55c76e337cdc47665a'),
(u'id', u'3f11df0a09588a6a1a9732e3837765f8')])) | 7.85663 | 11.351714 | 0.69211 |
self.__client.register_callback_duplicate(partial(self.__callback_payload_only, func), serialised=serialised) | def register_callback_duplicate(self, func, serialised=True) | Register a callback for resource creation but where the resource already exists in Iotic Space.
In this case the existing reference is passed to you.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of existing resource
lid : <name> # the local name of the
# existing resource
id : <GUID> # the global Id of the
# existing resource
epId : <GUID> # the global Id of your agent
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def duplicated_callback(args):
print(args)
...
client.register_callback_created(duplicated_callback)
This would print out something like the following on re-creation of an R_ENTITY
#!python
OrderedDict([(u'lid', u'new_thing1'), (u'r', 1),
(u'epId', u'ffd47b75ea786f55c76e337cdc47665a'),
(u'id', u'3f11df0a09588a6a1a9732e3837765f8')])) | 8.191312 | 12.615526 | 0.649304 |
self.__client.register_callback_renamed(partial(self.__callback_payload_only, func), serialised=serialised) | def register_callback_renamed(self, func, serialised=True) | Register a callback for resource rename. This will be called when any resource
is renamed within your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource deleted
lid : <name> # the new local name of the resource
oldLid : <name> # the old local name of the resource
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def renamed_callback(args):
print(args)
...
client.register_callback_renamed(renamed_callback)
This would print out something like the following on renaming of an R_ENTITY
#!python
OrderedDict([(u'lid', u'new_name'),
(u'r', 1),
(u'oldLid', u'old_name'),
(u'id', u'4448993b44738411de5fe2a6cf32d957')]) | 7.886672 | 9.661842 | 0.81627 |
self.__client.register_callback_deleted(partial(self.__callback_payload_only, func), serialised=serialised) | def register_callback_deleted(self, func, serialised=True) | Register a callback for resource deletion. This will be called when any resource
is deleted within your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource deleted
lid : <name> # the local name of the resource
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Example`
#!python
def deleted_callback(args):
print(args)
...
client.register_callback_deleted(deleted_callback)
This would print out something like the following on deletion of an R_ENTITY
#!python
OrderedDict([(u'lid', u'old_thing1'),
(u'r', 1),
(u'id', u'315637813d801ec6f057c67728bf00c2')]) | 8.507364 | 11.178221 | 0.761066 |
self.__client.register_callback_reassigned(partial(self.__callback_payload_only, func), serialised) | def register_callback_reassigned(self, func, serialised=True) | Register a callback for resource reassignment. This will be called when any resource
is reassigned to or from your agent.
If `serialised` is not set, the callbacks might arrive in a different order to they were requested.
The payload passed to your callback is an OrderedDict with the following keys
#!python
r : R_ENTITY, R_FEED, etc # the type of resource reassigned
lid : <name> # the local name of the resource
epId : <GUID> # the global Id of the agent the
# resource has been reassigned *to*
id : <GUID> # the global Id of the resource
`Note` resource types are defined [here](../Core/Const.m.html)
`Note` You can check whether this is an assign "in" or "out" by comparing the epId with your current
agent id, using the `IOT.Client.agent_id` property. If it's the same it's a reassign to you.
`Example`
#!python
def reassigned_callback(args):
print(args)
...
client.register_callback_reassigned(reassigned_callback)
This would print out something like the following on assignment of an R_ENTITY to
#!python
OrderedDict([(u'lid', u'moved_thing'),
(u'r', 1),
(u'epId', u'5a8d603ee757133d66d99875d0584c72'),
(u'id', u'4448993b44738411de5fe2a6cf32d957')]) | 9.333194 | 12.703176 | 0.734713 |
return self.__client.register_callback_created(partial(self.__callback_subscribed_filter, callback),
serialised=False) | def register_callback_subscribed(self, callback) | Register a callback for new subscription. This gets called whenever one of *your* things subscribes to something
else.
`Note` it is not called when whenever something else subscribes to your thing.
The payload passed to your callback is either a
[RemoteControl](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteControl) or
[RemoteFeed](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteFeed) instance. | 17.007301 | 19.333 | 0.879703 |
self.__client.simulate_feeddata(feedid, data, mime, time) | def simulate_feeddata(self, feedid, data, mime=None, time=None) | Simulate the last feeddata received for given feedid
Calls the registered callback for the feed with the last recieved feed data. Allows you to test your code
without having to wait for the remote thing to share again.
`feedid` (required) (string) local id of your Feed
`data` (optional) (as applicable) The data you want to use to simulate the arrival of remote feed data
`mime` (optional) (string) The mime type of your data. See also:
[share()](./Point.m.html#IoticAgent.IOT.Point.Feed.share)
`time` (optional) (datetime) UTC timestamp for share. See also:
[share()](./Point.m.html#IoticAgent.IOT.Point.Feed.share) | 4.116549 | 5.718212 | 0.719901 |
logger.info("confirm_tell(success=%s) [lid=\"%s\",pid=\"%s\"]", success, data[P_ENTITY_LID], data[P_LID])
evt = self._request_point_confirm_tell(R_CONTROL, data[P_ENTITY_LID], data[P_LID], success, data['requestId'])
self._wait_and_except_if_failed(evt) | def confirm_tell(self, data, success) | Confirm that you've done as you were told. Call this from your control callback to confirm action.
Used when you are advertising a control and you want to tell the remote requestor that you have
done what they asked you to.
`Example:` this is a minimal example to show the idea. Note - no Exception handling and ugly use of globals
#!python
client = None
def controlreq_cb(args):
global client # the client object you connected with
# perform your action with the data they sent
success = do_control_action(args['data'])
if args['confirm']: # you've been asked to confirm
client.confirm_tell(args, success)
# else, if you do not confirm_tell() this causes a timeout at the requestor's end.
client = IOT.Client(config='test.ini')
thing = client.create_thing('test321')
control = thing.create_control('control', controlreq_cb)
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`data` (mandatory) (dictionary) The `"args"` dictionary that your callback was called with
`success` (mandatory) (boolean) Whether or not the action you have been asked to do has been
sucessful.
More details on the contents of the `data` dictionary for controls see:
[create_control()](./Thing.m.html#IoticAgent.IOT.Thing.Thing.create_control) | 7.73266 | 8.712523 | 0.887534 |
self.__config.set('agent', 'seqnum', self.__client.get_seqnum())
self.__config.set('agent', 'lang', self.__client.default_lang)
self.__config.save() | def save_config(self) | Save the config, update the seqnum & default language | 5.569489 | 3.247752 | 1.714875 |
with self.__point_data_handlers:
try:
return self.__point_data_handlers[point]
except KeyError:
return self.__point_data_handlers.setdefault(point, PointDataObjectHandler(point, self)) | def _get_point_data_handler_for(self, point) | Used by point instances and data callbacks | 3.008162 | 2.868119 | 1.048828 |
# used by PointDataObjectHandler as reference
if foc == R_FEED:
point_ref = data['pid']
else: # R_CONTROL
point_ref = Control(self, data[P_ENTITY_LID], data[P_LID], '0' * 32)
try:
data['parsed'] = self._get_point_data_handler_for(point_ref).get_template(data=data[P_DATA])
except RefreshException:
# No metadata available, do not produce warning
if callback_plain:
callback_plain(data)
except:
logger.warning('Failed to parse %s data for %s%s', foc_to_str(foc), point_ref,
'' if callback_plain else ', ignoring',
exc_info=DEBUG_ENABLED)
if callback_plain:
callback_plain(data)
else:
callback_parsed(data) | def _parsed_callback_wrapper(self, callback_parsed, callback_plain, foc, data) | Used to by register_catchall_*data() and Thing class (follow, create_point) to present point data as an
object. | 7.618971 | 7.185107 | 1.060384 |
event.wait(timeout or self.__sync_timeout)
self._except_if_failed(event) | def _wait_and_except_if_failed(self, event, timeout=None) | Combines waiting for event and call to `_except_if_failed`. If timeout is not specified the configured
sync_timeout is used. | 8.101561 | 3.415649 | 2.371895 |
if event.success is None:
raise IOTSyncTimeout('Requested timed out', event)
if not event.success:
msg = "Request failed, unknown error"
if isinstance(event.payload, Mapping):
if P_MESSAGE in event.payload:
msg = event.payload[P_MESSAGE]
try:
exc_class = cls.__exception_mapping[event.payload[P_CODE]]
except KeyError:
pass
else:
raise exc_class(msg, event)
raise IOTException(msg, event) | def _except_if_failed(cls, event) | Raises an IOTException from the given event if it was not successful. Assumes timeout success flag on event
has not been set yet. | 4.749716 | 3.899136 | 1.218146 |
logger.info("list(all_my_agents=%s, limit=%s, offset=%s)", all_my_agents, limit, offset)
if all_my_agents:
evt = self._request_entity_list_all(limit=limit, offset=offset)
else:
evt = self._request_entity_list(limit=limit, offset=offset)
self._wait_and_except_if_failed(evt)
return evt.payload['entities'] | def list(self, all_my_agents=False, limit=500, offset=0) | List `all` the things created by this client on this or all your agents
Returns QAPI list function payload
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`all_my_agents` (optional) (boolean) If `False` limit search to just this agent,
if `True` return list of things belonging to all agents you own.
`limit` (optional) (integer) Return this many Point details
`offset` (optional) (integer) Return Point details starting at this offset | 2.821154 | 3.098098 | 0.910608 |
with self.__new_things:
try:
return self.__new_things.pop(lid)
except KeyError as ex:
raise_from(KeyError('Thing %s not know as new' % lid), ex) | def get_thing(self, lid) | Get the details of a newly created Thing. This only applies to asynchronous creation of Things and the
new Thing instance can only be retrieved once.
Returns a [Thing](Thing.m.html#IoticAgent.IOT.Thing.Thing) object,
which corresponds to the Thing with the given local id (nickname)
Raises `KeyError` if the Thing has not been newly created (or has already been retrieved by a previous call)
`lid` (required) (string) local identifier of your Thing. | 5.78836 | 5.933437 | 0.975549 |
evt = self.create_thing_async(lid)
self._wait_and_except_if_failed(evt)
try:
with self.__new_things:
return self.__new_things.pop(lid)
except KeyError as ex:
raise raise_from(IOTClientError('Thing %s not in cache (post-create)' % lid), ex) | def create_thing(self, lid) | Create a new Thing with a local id (lid).
Returns a [Thing](Thing.m.html#IoticAgent.IOT.Thing.Thing) object if successful
or if the Thing already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`lid` (required) (string) local identifier of your Thing. The local id is your name or nickname for the thing.
It's "local" in that it's only available to you on this container, not searchable and not visible to others. | 6.805871 | 7.583731 | 0.89743 |
logger.info("delete_thing(lid=\"%s\")", lid)
evt = self.delete_thing_async(lid)
self._wait_and_except_if_failed(evt) | def delete_thing(self, lid) | Delete a Thing
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`lid` (required) (string) local identifier of the Thing you want to delete | 5.886864 | 7.526738 | 0.782127 |
return self.search(text, lang, location, unit, limit, offset, reduced=True, local=local, scope=scope) | def search_reduced(self, text=None, lang=None, location=None, unit=None, limit=100, offset=0, local=None,
scope=SearchScope.PUBLIC) | Shorthand for [search()](#IoticAgent.IOT.Client.Client.search) with `reduced=True` | 3.203671 | 2.501198 | 1.280855 |
logger.info("search_located(text=\"%s\", lang=\"%s\", location=\"%s\", unit=\"%s\", limit=%s, offset=%s)",
text, lang, location, unit, limit, offset)
evt = self._request_search(text, lang, location, unit, limit, offset, SearchType.LOCATED, local, scope)
self._wait_and_except_if_failed(evt)
return evt.payload['result'] | def search_located(self, text=None, lang=None, location=None, unit=None, limit=100, offset=0, local=None,
scope=SearchScope.PUBLIC) | See [search()](#IoticAgent.IOT.Client.Client.search) for general documentation. Provides a thing-only
result set comprised only of things which have a location set, e.g.:
#!python
{
# Keyed by thing id
'2b2d8b068e404861b19f9e060877e002':
# location (g, lat & long), label (l, optional)
{'g': (52.4539, -1.74803), 'l': 'Weather Station #2'},
'76a3b24b02d34f20b675257624b0e001':
{'g': (52.244384, 0.716356), 'l': None},
'76a3b24b02d34f20b675257624b0e004':
{'g': (52.245384, 0.717356), 'l': 'Gasometer'},
'76a3b24b02d34f20b675257624b0e005':
{'g': (52.245384, 0.717356), 'l': 'Zepellin'}
} | 3.117546 | 3.221726 | 0.967663 |
if isinstance(guid_or_resource, self.__guid_resources):
guid = guid_or_resource.guid
elif isinstance(guid_or_resource, string_types):
guid = uuid_to_hex(guid_or_resource)
else:
raise ValueError("describe requires guid string or Thing, Point, RemoteFeed or RemoteControl instance")
logger.info('describe() [guid="%s"]', guid)
evt = self._request_describe(guid, lang, local, scope)
self._wait_and_except_if_failed(evt)
return evt.payload['result'] | def describe(self, guid_or_resource, lang=None, local=None, scope=DescribeScope.AUTO) | Describe returns the public (or local) description of a Thing or Point
Returns the description dict (see below for Thing example) if available, otherwise `None`
#!python
{
"type": "Entity",
"meta": {
"long": 0.716356,
"lat": 52.244384,
"label": "Weather Station #1",
"parent": "3bbf307b43b1460289fe707619dece3d",
"points": [
{
"type": "Control",
"label": "Control 101",
"guid": "fb1a4a4dbb2642ab9f836892da93c101",
"storesRecent": false
},
{
"type": "Feed",
"label": "My weather feed",
"guid": "fb1a4a4dbb2642ab9f836892da93f101",
"storesRecent": true
}
],
"comment": "A lovely weather station...",
"tags": [
"blue",
"garden"
]
}
}
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`guid_or_resource` (mandatory) (string or object).
If a `string`, it should contain the globally unique id of the resource you want to describe in 8-4-4-4-12
(or undashed) format.
If an `object`, it should be an instance of Thing, Point, RemoteFeed or RemoteControl. The system will return
you the description of that object.
`lang` (optional) (string) The two-character ISO 639-1 language code for which labels and comments will be
returned. This does not affect Values (i.e. when describing a Point, apart from value comments) and tags as
these are language neutral).
`local` (optional) (boolean) **Deprecated**, use `scope` instead. If `true`, lookup metadata at container level.
Check the local_meta flag to determine whether local metadata functionality is available. (Takes precedence over
`scope`.)
`scope` (optional) ([DescribeScope](../Core/Const.m.html#IoticAgent.Core.Const.DescribeScope)) Whether to
perform PUBLIC, LOCAL (container level) or LOCAL_OWN (container level restricted to own things) metadata lookup.
Check the [local_meta](#IoticAgent.IOT.Client.Client.local_meta) flag to determine whether local metadata
functionality is available. (Note that AUTO, PUBLIC and LOCAL_OWN scopes are always available.). AUTO mode
first attempts to look up private metadata, then public. | 5.945595 | 4.91374 | 1.209994 |
'''
Convenience method for creating a new OutcomeRequest from a request
object.
post_request is assumed to be a Django HttpRequest object
'''
request = OutcomeRequest()
request.post_request = post_request
request.process_xml(post_request.data)
return request | def from_post_request(post_request) | Convenience method for creating a new OutcomeRequest from a request
object.
post_request is assumed to be a Django HttpRequest object | 6.735453 | 3.212973 | 2.09633 |
'''
POSTs the given score to the Tool Consumer with a replaceResult.
OPTIONAL:
result_data must be a dictionary
Note: ONLY ONE of these values can be in the dict at a time,
due to the Canvas specification.
'text' : str text
'url' : str url
'''
self.operation = REPLACE_REQUEST
self.score = score
self.result_data = result_data
if result_data is not None:
if len(result_data) > 1:
error_msg = ('Dictionary result_data can only have one entry. '
'{0} entries were found.'.format(len(result_data)))
raise InvalidLTIConfigError(error_msg)
elif 'text' not in result_data and 'url' not in result_data:
error_msg = ('Dictionary result_data can only have the key '
'"text" or the key "url".')
raise InvalidLTIConfigError(error_msg)
else:
return self.post_outcome_request()
else:
return self.post_outcome_request() | def post_replace_result(self, score, result_data=None) | POSTs the given score to the Tool Consumer with a replaceResult.
OPTIONAL:
result_data must be a dictionary
Note: ONLY ONE of these values can be in the dict at a time,
due to the Canvas specification.
'text' : str text
'url' : str url | 4.388969 | 2.09374 | 2.096234 |
'''
POST an OAuth signed request to the Tool Consumer.
'''
if not self.has_required_attributes():
raise InvalidLTIConfigError(
'OutcomeRequest does not have all required attributes')
consumer = oauth2.Consumer(key=self.consumer_key,
secret=self.consumer_secret)
client = oauth2.Client(consumer)
# monkey_patch_headers ensures that Authorization
# header is NOT lower cased
monkey_patch_headers = True
monkey_patch_function = None
if monkey_patch_headers:
import httplib2
http = httplib2.Http
normalize = http._normalize_headers
def my_normalize(self, headers):
print("My Normalize", headers)
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
print("My Normalize", ret)
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
self.lis_outcome_service_url,
'POST',
body=self.generate_request_xml(),
headers={'Content-Type': 'application/xml'})
if monkey_patch_headers and monkey_patch_function:
import httplib2
http = httplib2.Http
http._normalize_headers = monkey_patch_function
self.outcome_response = OutcomeResponse.from_post_response(response,
content)
return self.outcome_response | def post_outcome_request(self) | POST an OAuth signed request to the Tool Consumer. | 3.359681 | 2.903076 | 1.157283 |
'''
Parse Outcome Request data from XML.
'''
root = objectify.fromstring(xml)
self.message_identifier = str(
root.imsx_POXHeader.imsx_POXRequestHeaderInfo.
imsx_messageIdentifier)
try:
result = root.imsx_POXBody.replaceResultRequest
self.operation = REPLACE_REQUEST
# Get result sourced id from resultRecord
self.lis_result_sourcedid = result.resultRecord.\
sourcedGUID.sourcedId
self.score = str(result.resultRecord.result.
resultScore.textString)
except:
pass
try:
result = root.imsx_POXBody.deleteResultRequest
self.operation = DELETE_REQUEST
# Get result sourced id from resultRecord
self.lis_result_sourcedid = result.resultRecord.\
sourcedGUID.sourcedId
except:
pass
try:
result = root.imsx_POXBody.readResultRequest
self.operation = READ_REQUEST
# Get result sourced id from resultRecord
self.lis_result_sourcedid = result.resultRecord.\
sourcedGUID.sourcedId
except:
pass | def process_xml(self, xml) | Parse Outcome Request data from XML. | 3.162625 | 2.826319 | 1.118991 |
if hasattr(self, 'sslopts'):
self.sock = ssl.wrap_socket(self.sock, **self.sslopts)
elif hasattr(self, 'sslctx'):
self.sock = self.sslctx.wrap_socket(self.sock,
server_hostname=self.hostname)
else:
self.sock = ssl.wrap_socket(self.sock)
self.sock.do_handshake()
self._quick_recv = self.sock.read | def _setup_transport(self) | Wrap the socket in an SSL object. | 2.488959 | 2.160543 | 1.152006 |
if self.sock is not None:
try:
unwrap = self.sock.unwrap
except AttributeError:
return
try:
self.sock = unwrap()
except ValueError:
# Failure within SSL might mean unwrap exists but socket is not
# deemed wrapped
pass | def _shutdown_transport(self) | Unwrap a Python 2.6 SSL socket, so we can call shutdown() | 10.74348 | 8.369261 | 1.283683 |
if self.__fname is None:
f = os.path.splitext(os.path.basename(argv[0]))[0] + '.ini'
cwd = os.getcwd()
# todo: prefer script path or current path ??
# print(os.path.realpath(sys.argv[0]))
# todo: if os.path.exists(os.path.join(cwd, main.__file__)):
return os.path.join(cwd, f)
return self.__fname | def _file_loc(self) | _file_loc helper returns a possible config filename.
EG /tmp/stuff/fish.py -> /tmp/stuff/fish.ini | 4.937777 | 4.398776 | 1.122534 |
logging.getLogger('amqp').setLevel(str_to_logging(self.get('logging', 'amqp')))
logging.getLogger('rdflib').setLevel(str_to_logging(self.get('logging', 'rdflib'))) | def setup_logging(self) | Setup logging module based on known modules in the config file | 3.852268 | 3.567838 | 1.079721 |
if self.__fname is None and filename is None:
raise ValueError('Config loaded from string, no filename specified')
conf = self.__config
cpa = dict_to_cp(conf)
with open(self.__fname if filename is None else filename, 'w') as f:
cpa.write(f) | def save(self, filename=None) | Write config to file. | 5.692148 | 5.080448 | 1.120403 |
val = val.lower()
if section in self.__config:
if val in self.__config[section]:
# logger.debug('get config %s %s = %s', section, val, self.__config[section][val])
return self.__config[section][val]
if section in self.__defaults:
if val in self.__defaults[section]:
# logger.debug('get defaults %s %s = %s', section, val, self.__defaults[section][val])
return self.__defaults[section][val]
return None | def get(self, section, val) | Get a setting or the default
`Returns` The current value of the setting `val` or the default, or `None` if not found
`section` (mandatory) (string) the section name in the config E.g. `"agent"`
`val` (mandatory) (string) the section name in the config E.g. `"host"` | 1.81186 | 1.971798 | 0.918887 |
val = val.lower()
if section in self.__config:
# logger.debug('set %s %s = %s', section, val, data)
self.__config[section][val] = data | def set(self, section, val, data) | Add a setting to the config
`section` (mandatory) (string) the section name in the config E.g. `"agent"`
`val` (mandatory) (string) the section name in the config E.g. `"host"`
`data` (mandatory) (as appropriate) the new value for the `val` | 3.595572 | 4.930175 | 0.729299 |
k = self.get(section, val)
# logger.debug('update %s %s from: %s to: %s', section, val, k, data)
if data is not None and k != data:
self.set(section, val, data) | def update(self, section, val, data) | Add a setting to the config, but if same as default or None then no action.
This saves the .save writing the defaults
`section` (mandatory) (string) the section name in the config E.g. `"agent"`
`val` (mandatory) (string) the section name in the config E.g. `"host"`
`data` (mandatory) (as appropriate) the new value for the `val` | 3.465704 | 3.557647 | 0.974156 |
lat = None
lon = None
# note: always picks from first triple
for _, _, o in self._graph.triples((None, GEO_NS.lat, None)):
lat = float(o)
break
for _, _, o in self._graph.triples((None, GEO_NS.long, None)):
lon = float(o)
break
return lat, lon | def get_location(self) | Gets the current geo location of your Thing
Returns tuple of `(lat, lon)` in `float` or `(None, None)` if location is not set for this Thing | 4.00055 | 4.321743 | 0.92568 |
# normally this should only remove one triple each
for s, p, o in self._graph.triples((None, GEO_NS.lat, None)):
self._graph.remove((s, p, o))
for s, p, o in self._graph.triples((None, GEO_NS.long, None)):
self._graph.remove((s, p, o)) | def delete_location(self) | Deletes all the `geo:lat` and `geo:long` metadata properties on your Thing | 3.232021 | 2.767379 | 1.1679 |
if not (self.__recv_thread or self.__send_thread):
self.__end.clear()
self.__send_ready.clear()
self.__recv_ready.clear()
timeout = self.__socket_timeout + 1
ignore_exc = self.__startup_ignore_exc
self.__send_exc_clear()
self.__recv_exc_clear()
# start & await send thread success (unless timeout reached or an exception has occured)
self.__send_thread = Thread(target=self.__send_run, name='amqplink_send')
self.__send_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__send_exc) or monotonic() - start_time > timeout):
success = self.__send_ready.wait(.25)
if success:
# start & await receiver thread success
self.__recv_thread = Thread(target=self.__recv_run, name='amqplink_recv')
self.__recv_thread.start()
start_time = monotonic()
success = False
while not (success or (not ignore_exc and self.__recv_exc) or monotonic() - start_time >= timeout):
success = self.__recv_ready.wait(.25)
# handler either thread's failure
if not success:
logger.warning("AmqpLink Failed to start. Giving up.")
self.stop()
if self.__recv_exc:
# prioritise receive thread since this can get access-denied whereas send does not (until sending)
raise_from(LinkException('Receive thread failure'), self.__recv_exc)
elif self.__send_exc:
raise_from(LinkException('Send thread failure'), self.__send_exc)
else:
raise LinkException('Unknown link failure (timeout reached)')
else:
raise LinkException('amqplink already started') | def start(self) | start connection threads, blocks until started | 3.609005 | 3.493739 | 1.032992 |
if self.__send_ready.is_set() and self.__recv_ready.is_set():
if self.__send_thread is not None and self.__recv_thread is not None:
return self.__send_thread.is_alive() and self.__recv_thread.is_alive()
return False | def is_alive(self) | Helper function to show if send & recv Threads are running | 2.490151 | 1.965458 | 1.266957 |
self.__end.set()
if self.__recv_thread:
self.__recv_thread.join()
self.__recv_thread = None
if self.__send_thread:
self.__send_thread.join()
self.__send_thread = None | def stop(self) | disconnect, blocks until stopped | 2.319795 | 2.083524 | 1.1134 |
if self.__send_ready.wait(timeout):
try:
with self.__send_lock:
# access denied response might be received inside send thread rather than here how to best handle?
self.__send_channel.basic_publish(msg=Message(body, delivery_mode=2, content_type=content_type),
exchange=self.__epid)
except exceptions.AccessRefused as exc:
raise_from(LinkException('Access denied'), exc)
except (exceptions.AMQPError, SocketError) as exc:
raise_from(LinkException('amqp/transport failure'), exc)
except Exception as exc: # pylint: disable=broad-except
raise_from(LinkException('unexpected failure'), exc)
else:
exc = self.__send_exc
if exc:
raise_from(LinkException('Sender unavailable'), exc)
else:
raise LinkException('Sender unavailable (unknown error)') | def send(self, body, content_type='application/ubjson', timeout=5) | timeout indicates amount of time to wait for sending thread to be ready. set to larger than zero to wait
(in seconds, fractional) or None to block. | 5.333031 | 5.109461 | 1.043756 |
if ((version_info[0] == 2 and (version_info[1] >= 7 and version_info[2] >= 5)) or
(version_info[0] == 3 and version_info[1] >= 4)):
logger.debug('SSL method for 2.7.5+ / 3.4+')
# pylint: disable=no-name-in-module
from ssl import SSLContext, PROTOCOL_TLSv1_2, CERT_REQUIRED, OP_NO_COMPRESSION
ctx = SSLContext(PROTOCOL_TLSv1_2)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# see CRIME security exploit
ctx.options |= OP_NO_COMPRESSION
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = False
else:
# Verify public certifcates if sslca is None (default)
from ssl import Purpose # pylint: disable=no-name-in-module
ctx.load_default_certs(purpose=Purpose.SERVER_AUTH)
ctx.verify_mode = CERT_REQUIRED
ctx.check_hostname = True
elif version_info[0] == 3 and version_info[1] < 4:
logger.debug('Using SSL method for 3.2+, < 3.4')
# pylint: disable=no-name-in-module
from ssl import SSLContext, CERT_REQUIRED, PROTOCOL_SSLv23, OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_TLSv1
ctx = SSLContext(PROTOCOL_SSLv23)
ctx.options |= (OP_NO_SSLv2 | OP_NO_SSLv3 | OP_NO_TLSv1)
ctx.set_ciphers('HIGH:!SSLv3:!TLSv1:!aNULL:@STRENGTH')
# the following options are used to verify the identity of the broker
if sslca:
ctx.load_verify_locations(sslca)
ctx.verify_mode = CERT_REQUIRED
else:
# Verify public certifcates if sslca is None (default)
ctx.set_default_verify_paths()
ctx.verify_mode = CERT_REQUIRED
else:
raise Exception("Unsupported Python version %s" % '.'.join(str(item) for item in version_info[:3]))
return ctx | def __get_ssl_context(cls, sslca=None) | Make an SSLConext for this Python version using public or sslca | 2.12076 | 2.079435 | 1.019873 |
try:
self.__msg_callback(msg)
except:
logger.exception("AmqpLink.__recv_cb exception calling msg_callback")
finally:
# only works if all messages handled in series
self.__last_id = msg.delivery_tag
self.__unacked += 1 | def __recv_cb(self, msg) | Calls user-provided callback and marks message for Ack regardless of success | 7.621408 | 7.341163 | 1.038174 |
self.__unacked = 0
self.__last_id = None
try:
self.__recv_ready.clear() # Ensure event is cleared for EG network failure/retry loop
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel_data,\
conn.channel() as channel_ka:
logger.debug('Connected, using cipher %s', conn.transport.sock.cipher()[0])
channel_data.basic_qos(prefetch_size=0, prefetch_count=self.__prefetch, a_global=False)
# exclusive=True. There can be only one (receiver)
msgtag = channel_data.basic_consume(queue=self.__epid, exclusive=True, callback=self.__recv_cb)
acktag = channel_ka.basic_consume(queue=('%s_ka' % self.__epid), exclusive=True, no_ack=True,
callback=self.__recv_ka_cb)
self.__ka_channel = channel_ka
self.__recv_exc_clear(log_if_exc_set='reconnected')
self.__recv_ready.set()
try:
#
# Drain loop
while not self.__end.is_set():
try:
while not self.__end.is_set() and self.__unacked < self.__ack_threshold:
# inner loop to handle all outstanding amqp messages
conn.drain_events(.1)
except SocketTimeout:
pass
# either have waited for .1s or threshold reached, so always ack
if self.__unacked:
logger.debug('acking (%d) up to %s', self.__unacked, self.__last_id)
channel_data.basic_ack(self.__last_id, multiple=True)
self.__unacked = 0
conn.heartbeat_tick()
finally:
self.__recv_ready.clear()
try:
channel_data.basic_cancel(msgtag)
channel_ka.basic_cancel(acktag)
except:
pass
except exceptions.AccessRefused:
self.__recv_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__recv_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__recv_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__recv_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__recv_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__recv_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished') | def __recv_run(self): # pylint: disable=too-many-branches,too-many-statements
while not self.__end.is_set() | Main receive thread/loop | 4.713915 | 4.632501 | 1.017575 |
logger.log(
((logging.DEBUG if self.__recv_exc else logging.ERROR) if level is None else level),
msg,
exc_info=DEBUG_ENABLED
)
self.__recv_exc = exc_info()[1]
self.__end.wait(wait_seconds) | def __recv_log_set_exc_and_wait(self, msg, level=None, wait_seconds=CONN_RETRY_DELAY_SECONDS) | Equivalent to __send_log_set_exc_and_wait but for receiver thread | 5.963048 | 5.366626 | 1.111135 |
if not (log_if_exc_set is None or self.__recv_exc is None):
logger.info(log_if_exc_set)
self.__recv_exc = None | def __recv_exc_clear(self, log_if_exc_set=None) | Equivalent to __send_exc_clear | 3.363524 | 3.244237 | 1.036769 |
while not self.__end.is_set():
try:
with Connection(userid=self.__prefix + self.__epid,
password=self.__passwd,
virtual_host=self.__vhost,
heartbeat=self.__heartbeat,
connect_timeout=self.__socket_timeout,
operation_timeout=self.__socket_timeout,
ssl=self.__get_ssl_context(self.__sslca),
host=self.__host) as conn,\
conn.channel(auto_encode_decode=False) as channel:
self.__send_channel = channel
self.__send_exc_clear(log_if_exc_set='reconnected')
self.__send_ready.set()
try:
self.__send_ready_callback(self.__send_exc_time)
while not self.__end.is_set():
with self.__send_lock:
try:
# deal with any incoming messages (AMQP protocol only, not QAPI)
conn.drain_events(0)
except (BlockingIOError, SocketTimeout):
pass
conn.heartbeat_tick()
# idle
self.__end.wait(.25)
finally:
# locked so can make sure another call to send() is not made whilst shutting down
with self.__send_lock:
self.__send_ready.clear()
except exceptions.AccessRefused:
self.__send_log_set_exc_and_wait('Access Refused (Credentials already in use?)')
except exceptions.ConnectionForced:
self.__send_log_set_exc_and_wait('Disconnected by broker (ConnectionForced)')
except SocketTimeout:
self.__send_log_set_exc_and_wait('SocketTimeout exception. wrong credentials, vhost or prefix?')
except SSLError:
self.__send_log_set_exc_and_wait('ssl.SSLError Bad Certificate?')
except (exceptions.AMQPError, SocketError):
self.__send_log_set_exc_and_wait('amqp/transport failure, sleeping before retry')
except:
self.__send_log_set_exc_and_wait('unexpected failure, exiting', wait_seconds=0)
break
logger.debug('finished') | def __send_run(self) | Send request thread | 5.223598 | 5.195362 | 1.005435 |
logger.log(
((logging.DEBUG if self.__send_exc else logging.ERROR) if level is None else level),
msg,
exc_info=DEBUG_ENABLED
)
self.__send_exc_time = monotonic()
self.__send_exc = exc_info()[1]
self.__end.wait(wait_seconds) | def __send_log_set_exc_and_wait(self, msg, level=None, wait_seconds=CONN_RETRY_DELAY_SECONDS) | To be called in exception context only.
msg - message to log
level - logging level. If not specified, ERROR unless it is a repeated failure in which case DEBUG. If
specified, the given level will always be used.
wait_seconds - how long to pause for (so retry is not triggered immediately) | 5.431031 | 6.039233 | 0.899292 |
if not (log_if_exc_set is None or self.__send_exc is None):
logger.info(log_if_exc_set)
self.__send_exc_time = None
self.__send_exc = None | def __send_exc_clear(self, log_if_exc_set=None) | Clear send exception and time. If exception was previously was set, optionally log log_if_exc_set at INFO
level. | 3.256645 | 2.56123 | 1.271516 |
if isinstance(type_, unicode_type):
match = __IDX_PATTERN.match(type_)
if match:
return match.group(1) in __IDX_MAPPING
else:
return __is_ascii(type_, 1, __MAX_LEN)
else:
return type_ is None and allow_none | def valid_mimetype(type_, allow_none=True) | Checks for validity of given type, optionally allowing for a None value. Note: Unknown idx/NUMBER notation, where
NUMBER is not a known shorthand mapping, will be rejected, i.e. type_ is valid if it
1) is an ASCII-only string between 1 & 64 characters long
2a) does not begin with "idx/" OR
2b) begins with "idx/" and is followed by a known shorthand index (integer) | 5.334244 | 4.576439 | 1.165588 |
if isinstance(type_, unicode_type):
match = __IDX_PATTERN.match(type_)
return __IDX_MAPPING.get(match.group(1), type_) if match else type_
else:
return type_ | def expand_idx_mimetype(type_) | Returns long equivalent of type_, if available, otherwise type_ itself. Does not raise exceptions | 4.005756 | 3.612805 | 1.108766 |
iterations = self.__iterations
timestamp = monotonic()
outdated_threshold = timestamp - self.__interval
with self.__lock:
# remove any iterations older than interval
try:
while iterations[0] < outdated_threshold:
iterations.popleft()
except IndexError:
pass
# apply throttling if rate would be exceeded
if len(iterations) <= self.__max_iterations:
iterations.append(timestamp)
retval = None
else:
# wait until oldest sample is too old
delay = max(0, iterations[0] + self.__interval - timestamp)
# only notify user about longer delays
if delay > 1:
logger.warning('Send throttling delay (interval=%d, max_iterations=%d): %.2fs', self.__interval,
self.__max_iterations, delay)
retval = self.__wait_cmd(delay)
# log actual addition time
iterations.append(monotonic())
return retval | def throttle(self) | Uses time.monotonic() (or time.sleep() if not available) to limit to the desired rate. Should be called once
per iteration of action which is to be throttled. Returns None unless a custom wait_cmd was specified in the
constructor in which case its return value is used if a wait was required. | 5.67667 | 5.078475 | 1.11779 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.