query
stringlengths 9
3.4k
| document
stringlengths 9
87.4k
| metadata
dict | negatives
listlengths 4
101
| negative_scores
listlengths 4
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
|---|---|---|---|---|---|---|
This operation can be used to release the internal endpoint of a shard or Configserver node in a sharded cluster instance. For more information, see [Release the endpoint of a shard or Configserver node](~~134067~~). To release the public endpoint of a shard or Configserver node in a sharded cluster instance, you can call the [ReleasePublicNetworkAddress](~~67604~~) operation.
|
def release_node_private_network_address_with_options(
self,
request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.network_type):
query['NetworkType'] = request.network_type
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ReleaseNodePrivateNetworkAddress',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')",
"def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)",
"def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False",
"def disassociate_elastic_ip(ElasticIp=None):\n pass",
"async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def delete_endpoint(EndpointName=None):\n pass",
"def test_delete_host_subnet(self):\n pass",
"def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response",
"def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):",
"def deregister_elastic_ip(ElasticIp=None):\n pass",
"def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def test_replace_host_subnet(self):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n LOG.debug(\"release_dhcp_port: %s %s\", network_id, device_id)",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)",
"def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)",
"def remove_gateway(self, network_ref):\n raise NotImplementedError()",
"def post_instance_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)",
"def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))",
"def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})",
"def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)",
"def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})",
"def unregister_router(self, hostname):",
"def test_delete_collection_host_subnet(self):\n pass",
"def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))",
"def delete(ctx, iface, resource_config, **_):\n vpn_connection = ctx.instance.runtime_properties.get('VPN_CONNECTION_ID')\n cider_block = ctx.instance.runtime_properties.get('DESTINATION_CIDR_BLOCK')\n\n params = dict(VpnConnectionId=vpn_connection,\n DestinationCidrBlock=cider_block) \\\n if not resource_config else resource_config.copy()\n iface.delete(params)",
"def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])",
"def delete_internal_interface(self, oid, subnet):\n data = {\"subnet_id\": subnet}\n path = '%s/routers/%s/remove_router_interface' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data),\n token=self.manager.identity.token)\n self.logger.debug('Delete an internal interface from openstack router %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def cluster_release(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.release_cluster(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster release failed\")",
"def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def post_subnet_delete(self, resource_id, resource_dict):\n pass",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def delete_network_object(session, key):\n # type: (Session, Text) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n return _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})",
"def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)",
"def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_delete_cluster_network(self):\n pass",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def post_network_ipam_delete(self, resource_id, resource_dict):\n pass",
"def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return",
"def post_virtual_network_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def free_host(ptr):\n cptr = ct.c_void_p(ptr)\n safe_call(backend.get().af_free_host(cptr))",
"def test_delete_namespaced_egress_network_policy(self):\n pass",
"def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )",
"def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)",
"def test_esg_gateway_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|dgw_ip', esg_gateway.delete,\n {'gateway': {}},\n # read\n read_args=['routingConfigStatic'],\n read_kwargs={'uri_parameters': {'edgeId': \"esg_id\"}},\n read_response={\n 'status': 204,\n 'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE\n },\n # update\n update_args=['routingConfigStatic'],\n update_kwargs={\n 'uri_parameters': {'edgeId': \"esg_id\"},\n 'request_body_dict': {\n 'staticRouting': {\n 'staticRoutes': {},\n 'defaultRoute': None\n }\n }\n }\n )",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def post_floating_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)",
"def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass",
"def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)",
"def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)",
"def unlink(address):",
"def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)",
"def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)",
"def test_unlink_relink():\n\n topology = \"\"\"\\\n [type=host] hs1\n [type=host] hs2\n [identifier=thelink] hs1:a -- hs2:b\n \"\"\"\n\n mgr = TopologyManager(engine='docker')\n mgr.parse(topology)\n mgr.build()\n\n hs1 = mgr.get('hs1')\n hs2 = mgr.get('hs2')\n\n try:\n\n assert hs1 is not None\n assert hs2 is not None\n\n # Configure IPs\n hs1('ip link set dev a up')\n hs1('ip addr add 10.0.15.1/24 dev a')\n hs2('ip link set dev b up')\n hs2('ip addr add 10.0.15.2/24 dev b')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n # Unlink\n mgr.unlink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert 'Network is unreachable' in ping_result\n\n # Relink\n mgr.relink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n finally:\n mgr.unbuild()",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret",
"def delete_network_profile(arn=None):\n pass",
"def connection(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"connection\", public_id)",
"def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data",
"def test_create_host_subnet(self):\n pass",
"def paths_revoke_network_block(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_block, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))",
"def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)",
"def test_patch_namespaced_egress_network_policy(self):\n pass",
"def delete_network_segments(self, tenant_id, network_segments):",
"def _delete_local_endpoint(self, resource, event, trigger, **kwargs):\n router_id = kwargs.get('router_id')\n # delete the local endpoint from the NSX\n local_ep_id = self._search_local_endpint(router_id)\n if local_ep_id:\n self._nsx_vpn.local_endpoint.delete(local_ep_id)\n # delete the neutron port with this IP\n ctx = n_context.get_admin_context()\n port = self._find_vpn_service_port(ctx, router_id)\n if port:\n self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True)",
"def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)",
"def put_network_object(session, key, data):\n # type: (Session, Text, Any) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n _put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key})",
"def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)",
"def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)",
"def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])",
"def contract(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"contract\", public_id)",
"def post_physical_router_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def delete_public_ip(self, ip=None):\n raise NotImplementedError",
"def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret",
"def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})",
"def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)"
] |
[
"0.5712464",
"0.5607081",
"0.5554131",
"0.524051",
"0.52084637",
"0.5085088",
"0.50104165",
"0.49779233",
"0.49513933",
"0.49257186",
"0.4916261",
"0.48996267",
"0.48951858",
"0.48708498",
"0.48667976",
"0.48482025",
"0.47911367",
"0.47712603",
"0.47670642",
"0.4749826",
"0.47098243",
"0.46974462",
"0.46822196",
"0.4679282",
"0.46725243",
"0.46538797",
"0.46525046",
"0.4645978",
"0.4635546",
"0.46105352",
"0.46040067",
"0.45984492",
"0.45847303",
"0.45666355",
"0.45643458",
"0.45587006",
"0.45458528",
"0.45452514",
"0.4544383",
"0.45364323",
"0.45319712",
"0.4529994",
"0.45297205",
"0.45243186",
"0.45220402",
"0.4479313",
"0.44636658",
"0.44594795",
"0.4450905",
"0.44440255",
"0.44419134",
"0.44377112",
"0.4426881",
"0.442375",
"0.44170958",
"0.43979308",
"0.43937796",
"0.43844596",
"0.43825072",
"0.43732703",
"0.43649384",
"0.4364843",
"0.4363933",
"0.4361383",
"0.43582892",
"0.43528327",
"0.4342931",
"0.43378478",
"0.43333307",
"0.43324977",
"0.43301854",
"0.43289417",
"0.43268695",
"0.4318821",
"0.43137452",
"0.43133882",
"0.42992085",
"0.42959705",
"0.42947572",
"0.4292423",
"0.42910978",
"0.42840382",
"0.4276379",
"0.4276368",
"0.42695314",
"0.42684135",
"0.42664507",
"0.4265597",
"0.42623448",
"0.4259488",
"0.42533547",
"0.425233",
"0.4251604",
"0.42503193",
"0.42441076",
"0.42440018",
"0.42417163",
"0.42322126",
"0.423011",
"0.4224818"
] |
0.54412097
|
3
|
This operation can be used to release the internal endpoint of a shard or Configserver node in a sharded cluster instance. For more information, see [Release the endpoint of a shard or Configserver node](~~134067~~). To release the public endpoint of a shard or Configserver node in a sharded cluster instance, you can call the [ReleasePublicNetworkAddress](~~67604~~) operation.
|
async def release_node_private_network_address_with_options_async(
self,
request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.network_type):
query['NetworkType'] = request.network_type
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ReleaseNodePrivateNetworkAddress',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')",
"def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)",
"def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False",
"def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )",
"def disassociate_elastic_ip(ElasticIp=None):\n pass",
"def delete_endpoint(EndpointName=None):\n pass",
"def test_delete_host_subnet(self):\n pass",
"def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response",
"def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):",
"def deregister_elastic_ip(ElasticIp=None):\n pass",
"def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def test_replace_host_subnet(self):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n LOG.debug(\"release_dhcp_port: %s %s\", network_id, device_id)",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)",
"def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)",
"def remove_gateway(self, network_ref):\n raise NotImplementedError()",
"def post_instance_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)",
"def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))",
"def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})",
"def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)",
"def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})",
"def unregister_router(self, hostname):",
"def test_delete_collection_host_subnet(self):\n pass",
"def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))",
"def delete(ctx, iface, resource_config, **_):\n vpn_connection = ctx.instance.runtime_properties.get('VPN_CONNECTION_ID')\n cider_block = ctx.instance.runtime_properties.get('DESTINATION_CIDR_BLOCK')\n\n params = dict(VpnConnectionId=vpn_connection,\n DestinationCidrBlock=cider_block) \\\n if not resource_config else resource_config.copy()\n iface.delete(params)",
"def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])",
"def delete_internal_interface(self, oid, subnet):\n data = {\"subnet_id\": subnet}\n path = '%s/routers/%s/remove_router_interface' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data),\n token=self.manager.identity.token)\n self.logger.debug('Delete an internal interface from openstack router %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def cluster_release(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.release_cluster(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster release failed\")",
"def post_subnet_delete(self, resource_id, resource_dict):\n pass",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def delete_network_object(session, key):\n # type: (Session, Text) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n return _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})",
"def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)",
"def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_delete_cluster_network(self):\n pass",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def post_network_ipam_delete(self, resource_id, resource_dict):\n pass",
"def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return",
"def post_virtual_network_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def free_host(ptr):\n cptr = ct.c_void_p(ptr)\n safe_call(backend.get().af_free_host(cptr))",
"def test_delete_namespaced_egress_network_policy(self):\n pass",
"def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )",
"def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)",
"def test_esg_gateway_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|dgw_ip', esg_gateway.delete,\n {'gateway': {}},\n # read\n read_args=['routingConfigStatic'],\n read_kwargs={'uri_parameters': {'edgeId': \"esg_id\"}},\n read_response={\n 'status': 204,\n 'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE\n },\n # update\n update_args=['routingConfigStatic'],\n update_kwargs={\n 'uri_parameters': {'edgeId': \"esg_id\"},\n 'request_body_dict': {\n 'staticRouting': {\n 'staticRoutes': {},\n 'defaultRoute': None\n }\n }\n }\n )",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def post_floating_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)",
"def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass",
"def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)",
"def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)",
"def unlink(address):",
"def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)",
"def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)",
"def test_unlink_relink():\n\n topology = \"\"\"\\\n [type=host] hs1\n [type=host] hs2\n [identifier=thelink] hs1:a -- hs2:b\n \"\"\"\n\n mgr = TopologyManager(engine='docker')\n mgr.parse(topology)\n mgr.build()\n\n hs1 = mgr.get('hs1')\n hs2 = mgr.get('hs2')\n\n try:\n\n assert hs1 is not None\n assert hs2 is not None\n\n # Configure IPs\n hs1('ip link set dev a up')\n hs1('ip addr add 10.0.15.1/24 dev a')\n hs2('ip link set dev b up')\n hs2('ip addr add 10.0.15.2/24 dev b')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n # Unlink\n mgr.unlink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert 'Network is unreachable' in ping_result\n\n # Relink\n mgr.relink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n finally:\n mgr.unbuild()",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def delete_network_profile(arn=None):\n pass",
"def connection(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"connection\", public_id)",
"def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data",
"def paths_revoke_network_block(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_block, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))",
"def test_create_host_subnet(self):\n pass",
"def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)",
"def delete_network_segments(self, tenant_id, network_segments):",
"def test_patch_namespaced_egress_network_policy(self):\n pass",
"def _delete_local_endpoint(self, resource, event, trigger, **kwargs):\n router_id = kwargs.get('router_id')\n # delete the local endpoint from the NSX\n local_ep_id = self._search_local_endpint(router_id)\n if local_ep_id:\n self._nsx_vpn.local_endpoint.delete(local_ep_id)\n # delete the neutron port with this IP\n ctx = n_context.get_admin_context()\n port = self._find_vpn_service_port(ctx, router_id)\n if port:\n self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True)",
"def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)",
"def put_network_object(session, key, data):\n # type: (Session, Text, Any) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n _put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key})",
"def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)",
"def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)",
"def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])",
"def contract(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"contract\", public_id)",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def post_physical_router_delete(self, resource_id, resource_dict):\n pass",
"def delete_public_ip(self, ip=None):\n raise NotImplementedError",
"def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret",
"def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})",
"def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)"
] |
[
"0.57126313",
"0.5606727",
"0.55545366",
"0.5440909",
"0.5240857",
"0.5087529",
"0.5010123",
"0.49782026",
"0.49522084",
"0.49264276",
"0.4914645",
"0.48995593",
"0.48968387",
"0.48700112",
"0.48656347",
"0.48495722",
"0.47913688",
"0.47733217",
"0.47699565",
"0.47490388",
"0.4710802",
"0.46971422",
"0.46835113",
"0.46804243",
"0.4672167",
"0.46541747",
"0.46514514",
"0.46464825",
"0.46361813",
"0.46111998",
"0.4603677",
"0.46004033",
"0.4583557",
"0.4566374",
"0.4564702",
"0.45589155",
"0.454613",
"0.45447463",
"0.45441866",
"0.45364803",
"0.4533749",
"0.45295912",
"0.45291963",
"0.45257398",
"0.45221975",
"0.4480759",
"0.4463036",
"0.44610947",
"0.44500196",
"0.44439012",
"0.4441364",
"0.4438963",
"0.4427706",
"0.44238478",
"0.44178247",
"0.43994355",
"0.43947002",
"0.43854585",
"0.4383274",
"0.43740088",
"0.43652216",
"0.4364917",
"0.4363548",
"0.4361464",
"0.43581405",
"0.43529564",
"0.43424913",
"0.43381116",
"0.4334107",
"0.4331395",
"0.43308392",
"0.4329054",
"0.4327135",
"0.4319799",
"0.43159398",
"0.43142366",
"0.4299894",
"0.4297153",
"0.4294757",
"0.42935988",
"0.42921892",
"0.428404",
"0.42769963",
"0.427648",
"0.42710745",
"0.42710334",
"0.42662442",
"0.4265663",
"0.42617637",
"0.42595515",
"0.42551056",
"0.4252821",
"0.42525008",
"0.4250867",
"0.4243994",
"0.4243589",
"0.42407",
"0.4232862",
"0.42311868",
"0.42249367"
] |
0.5208502
|
5
|
This operation can be used to release the internal endpoint of a shard or Configserver node in a sharded cluster instance. For more information, see [Release the endpoint of a shard or Configserver node](~~134067~~). To release the public endpoint of a shard or Configserver node in a sharded cluster instance, you can call the [ReleasePublicNetworkAddress](~~67604~~) operation.
|
def release_node_private_network_address(
self,
request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,
) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:
runtime = util_models.RuntimeOptions()
return self.release_node_private_network_address_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')",
"def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False",
"def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )",
"def disassociate_elastic_ip(ElasticIp=None):\n pass",
"async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def delete_endpoint(EndpointName=None):\n pass",
"def test_delete_host_subnet(self):\n pass",
"def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response",
"def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):",
"def deregister_elastic_ip(ElasticIp=None):\n pass",
"def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"async def release_node_private_network_address_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return await self.release_node_private_network_address_with_options_async(request, runtime)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def test_replace_host_subnet(self):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n LOG.debug(\"release_dhcp_port: %s %s\", network_id, device_id)",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)",
"def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)",
"def remove_gateway(self, network_ref):\n raise NotImplementedError()",
"def post_instance_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)",
"def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))",
"def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})",
"def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)",
"def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})",
"def unregister_router(self, hostname):",
"def test_delete_collection_host_subnet(self):\n pass",
"def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))",
"def delete(ctx, iface, resource_config, **_):\n vpn_connection = ctx.instance.runtime_properties.get('VPN_CONNECTION_ID')\n cider_block = ctx.instance.runtime_properties.get('DESTINATION_CIDR_BLOCK')\n\n params = dict(VpnConnectionId=vpn_connection,\n DestinationCidrBlock=cider_block) \\\n if not resource_config else resource_config.copy()\n iface.delete(params)",
"def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])",
"def delete_internal_interface(self, oid, subnet):\n data = {\"subnet_id\": subnet}\n path = '%s/routers/%s/remove_router_interface' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data),\n token=self.manager.identity.token)\n self.logger.debug('Delete an internal interface from openstack router %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def cluster_release(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.release_cluster(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster release failed\")",
"def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def post_subnet_delete(self, resource_id, resource_dict):\n pass",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def delete_network_object(session, key):\n # type: (Session, Text) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n return _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})",
"def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)",
"def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_delete_cluster_network(self):\n pass",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def post_network_ipam_delete(self, resource_id, resource_dict):\n pass",
"def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return",
"def post_virtual_network_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def free_host(ptr):\n cptr = ct.c_void_p(ptr)\n safe_call(backend.get().af_free_host(cptr))",
"def test_delete_namespaced_egress_network_policy(self):\n pass",
"def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )",
"def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)",
"def test_esg_gateway_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|dgw_ip', esg_gateway.delete,\n {'gateway': {}},\n # read\n read_args=['routingConfigStatic'],\n read_kwargs={'uri_parameters': {'edgeId': \"esg_id\"}},\n read_response={\n 'status': 204,\n 'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE\n },\n # update\n update_args=['routingConfigStatic'],\n update_kwargs={\n 'uri_parameters': {'edgeId': \"esg_id\"},\n 'request_body_dict': {\n 'staticRouting': {\n 'staticRoutes': {},\n 'defaultRoute': None\n }\n }\n }\n )",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def post_floating_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)",
"def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass",
"def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)",
"def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)",
"def unlink(address):",
"def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)",
"def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)",
"def test_unlink_relink():\n\n topology = \"\"\"\\\n [type=host] hs1\n [type=host] hs2\n [identifier=thelink] hs1:a -- hs2:b\n \"\"\"\n\n mgr = TopologyManager(engine='docker')\n mgr.parse(topology)\n mgr.build()\n\n hs1 = mgr.get('hs1')\n hs2 = mgr.get('hs2')\n\n try:\n\n assert hs1 is not None\n assert hs2 is not None\n\n # Configure IPs\n hs1('ip link set dev a up')\n hs1('ip addr add 10.0.15.1/24 dev a')\n hs2('ip link set dev b up')\n hs2('ip addr add 10.0.15.2/24 dev b')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n # Unlink\n mgr.unlink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert 'Network is unreachable' in ping_result\n\n # Relink\n mgr.relink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n finally:\n mgr.unbuild()",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret",
"def delete_network_profile(arn=None):\n pass",
"def connection(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"connection\", public_id)",
"def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data",
"def test_create_host_subnet(self):\n pass",
"def paths_revoke_network_block(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_block, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))",
"def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)",
"def test_patch_namespaced_egress_network_policy(self):\n pass",
"def delete_network_segments(self, tenant_id, network_segments):",
"def _delete_local_endpoint(self, resource, event, trigger, **kwargs):\n router_id = kwargs.get('router_id')\n # delete the local endpoint from the NSX\n local_ep_id = self._search_local_endpint(router_id)\n if local_ep_id:\n self._nsx_vpn.local_endpoint.delete(local_ep_id)\n # delete the neutron port with this IP\n ctx = n_context.get_admin_context()\n port = self._find_vpn_service_port(ctx, router_id)\n if port:\n self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True)",
"def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)",
"def put_network_object(session, key, data):\n # type: (Session, Text, Any) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n _put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key})",
"def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)",
"def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)",
"def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])",
"def contract(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"contract\", public_id)",
"def post_physical_router_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def delete_public_ip(self, ip=None):\n raise NotImplementedError",
"def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret",
"def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})",
"def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)"
] |
[
"0.5712464",
"0.5554131",
"0.54412097",
"0.524051",
"0.52084637",
"0.5085088",
"0.50104165",
"0.49779233",
"0.49513933",
"0.49257186",
"0.4916261",
"0.48996267",
"0.48951858",
"0.48708498",
"0.48667976",
"0.48482025",
"0.47911367",
"0.47712603",
"0.47670642",
"0.4749826",
"0.47098243",
"0.46974462",
"0.46822196",
"0.4679282",
"0.46725243",
"0.46538797",
"0.46525046",
"0.4645978",
"0.4635546",
"0.46105352",
"0.46040067",
"0.45984492",
"0.45847303",
"0.45666355",
"0.45643458",
"0.45587006",
"0.45458528",
"0.45452514",
"0.4544383",
"0.45364323",
"0.45319712",
"0.4529994",
"0.45297205",
"0.45243186",
"0.45220402",
"0.4479313",
"0.44636658",
"0.44594795",
"0.4450905",
"0.44440255",
"0.44419134",
"0.44377112",
"0.4426881",
"0.442375",
"0.44170958",
"0.43979308",
"0.43937796",
"0.43844596",
"0.43825072",
"0.43732703",
"0.43649384",
"0.4364843",
"0.4363933",
"0.4361383",
"0.43582892",
"0.43528327",
"0.4342931",
"0.43378478",
"0.43333307",
"0.43324977",
"0.43301854",
"0.43289417",
"0.43268695",
"0.4318821",
"0.43137452",
"0.43133882",
"0.42992085",
"0.42959705",
"0.42947572",
"0.4292423",
"0.42910978",
"0.42840382",
"0.4276379",
"0.4276368",
"0.42695314",
"0.42684135",
"0.42664507",
"0.4265597",
"0.42623448",
"0.4259488",
"0.42533547",
"0.425233",
"0.4251604",
"0.42503193",
"0.42441076",
"0.42440018",
"0.42417163",
"0.42322126",
"0.423011",
"0.4224818"
] |
0.5607081
|
1
|
This operation can be used to release the internal endpoint of a shard or Configserver node in a sharded cluster instance. For more information, see [Release the endpoint of a shard or Configserver node](~~134067~~). To release the public endpoint of a shard or Configserver node in a sharded cluster instance, you can call the [ReleasePublicNetworkAddress](~~67604~~) operation.
|
async def release_node_private_network_address_async(
self,
request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,
) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:
runtime = util_models.RuntimeOptions()
return await self.release_node_private_network_address_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def release_address(self, public_ip=None, allocation_id=None):\r\n params = {}\r\n\r\n if public_ip is not None:\r\n params['PublicIp'] = public_ip\r\n elif allocation_id is not None:\r\n params['AllocationId'] = allocation_id\r\n\r\n return self.get_status('ReleaseAddress', params, verb='POST')",
"def release_node_private_network_address(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n runtime = util_models.RuntimeOptions()\n return self.release_node_private_network_address_with_options(request, runtime)",
"def release_eip_address(\n public_ip=None, allocation_id=None, region=None, key=None, keyid=None, profile=None\n):\n if not salt.utils.data.exactly_one((public_ip, allocation_id)):\n raise SaltInvocationError(\n \"Exactly one of 'public_ip' OR 'allocation_id' must be provided\"\n )\n\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n return conn.release_address(public_ip, allocation_id)\n except boto.exception.BotoServerError as e:\n log.error(e)\n return False",
"def release_node_private_network_address_with_options(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n self.call_api(params, req, runtime)\n )",
"def disassociate_elastic_ip(ElasticIp=None):\n pass",
"async def release_node_private_network_address_with_options_async(\n self,\n request: dds_20151201_models.ReleaseNodePrivateNetworkAddressRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.network_type):\n query['NetworkType'] = request.network_type\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ReleaseNodePrivateNetworkAddress',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ReleaseNodePrivateNetworkAddressResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def delete_endpoint(EndpointName=None):\n pass",
"def test_delete_host_subnet(self):\n pass",
"def remove_ip(enode, portlbl, addr, shell=None):\n assert portlbl\n assert ip_interface(addr)\n port = enode.ports[portlbl]\n\n cmd = 'ip addr del {addr} dev {port}'.format(addr=addr, port=port)\n response = enode(cmd, shell=shell)\n assert not response",
"def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):",
"def deregister_elastic_ip(ElasticIp=None):\n pass",
"def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def delete_endpoint_config(EndpointConfigName=None):\n pass",
"def test_replace_host_subnet(self):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n LOG.debug(\"release_dhcp_port: %s %s\", network_id, device_id)",
"def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)",
"def test_routing_ip_prefix_uninstall(self):\n self._common_uninstall_external_and_unintialized(\n 'some_id', routing_ip_prefix.delete,\n {'prefix': {}}\n )",
"def delete_endpoint(self):\n logger.warning(f\"Deleting hosting endpoint '{self.endpoint_name}'...\")\n self._realtime_predictor.delete_endpoint()",
"def delete_dev_endpoint(self):\n self.glue_engine.delete_dev_endpoint(EndpointName=self.dev_endpoint_name)",
"def test_deploy_instance_with_new_network(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 252\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr)",
"def remove_gateway(self, network_ref):\n raise NotImplementedError()",
"def post_instance_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete(profile):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"InternetGatewayId\"] = vpc\n return client.delete_internet_gateway(**params)",
"def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))",
"def remove_fixed_ip_from_instance(self, context, instance_id, address):\n args = {'instance_id': instance_id,\n 'address': address}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'remove_fixed_ip_from_instance',\n 'args': args})",
"def disassociate_resolver_endpoint_ip_address(ResolverEndpointId=None, IpAddress=None):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def release(self, floating_ip_id):\n self.client.delete_floatingip(floating_ip_id)",
"def release_floating_ip(self, context, address,\n affect_auto_assigned=False):\n floating_ip = self.db.floating_ip_get_by_address(context, address)\n if floating_ip['fixed_ip']:\n raise exception.ApiError(_('Floating ip is in use. '\n 'Disassociate it before releasing.'))\n if not affect_auto_assigned and floating_ip.get('auto_assigned'):\n return\n # NOTE(vish): We don't know which network host should get the ip\n # when we deallocate, so just send it to any one. This\n # will probably need to move into a network supervisor\n # at some point.\n rpc.cast(context,\n FLAGS.network_topic,\n {'method': 'deallocate_floating_ip',\n 'args': {'floating_address': floating_ip['address']}})",
"def unregister_router(self, hostname):",
"def test_delete_collection_host_subnet(self):\n pass",
"def update_endpoint(EndpointName=None, EndpointConfigName=None):\n pass",
"def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)",
"def remove_ipv4_address(self, net_interface, address):\n self._runner.run('ip addr del %s dev %s' % (address, net_interface))",
"def delete(ctx, iface, resource_config, **_):\n vpn_connection = ctx.instance.runtime_properties.get('VPN_CONNECTION_ID')\n cider_block = ctx.instance.runtime_properties.get('DESTINATION_CIDR_BLOCK')\n\n params = dict(VpnConnectionId=vpn_connection,\n DestinationCidrBlock=cider_block) \\\n if not resource_config else resource_config.copy()\n iface.delete(params)",
"def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])",
"def cluster_release(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.release_cluster(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster release failed\")",
"def delete_internal_interface(self, oid, subnet):\n data = {\"subnet_id\": subnet}\n path = '%s/routers/%s/remove_router_interface' % (self.ver, oid)\n res = self.client.call(path, 'PUT', data=json.dumps(data),\n token=self.manager.identity.token)\n self.logger.debug('Delete an internal interface from openstack router %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass",
"def post_subnet_delete(self, resource_id, resource_dict):\n pass",
"async def deregister_endpoint(self, handle: str) -> None:\n await self.AD.http.deregister_endpoint(handle, self.name)",
"def test_deploy_instance_with_new_network_and_keypair(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_keypair_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 250\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n keypair_name=keypair_name)",
"def delete_network_object(session, key):\n # type: (Session, Text) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n return _delete(session, url_tail, {CoordConstsV2.QP_KEY: key})",
"def disassociate(self, endpoint_name=None, instance_id=None):\n if instance_id is None:\n raise Exception(\"Instance required!\")\n if endpoint_name is None:\n self.request('/v1.1/endpoint/instances/%s' % instance_id, 'DELETE')\n else:\n self.request('/v1.1/endpoints/%s/instances/%s' %\n (endpoint_name, instance_id), 'DELETE')",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))",
"def test_delete_cluster_network(self):\n pass",
"def delete_handler(event, context):\n delete_endpoint_config(event)",
"def post_network_ipam_delete(self, resource_id, resource_dict):\n pass",
"def nic_delete(args):\n name = args.name\n interface = args.interface\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Deleting nic from %s...\" % name)\n k.delete_nic(name, interface)\n return",
"def post_virtual_network_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))",
"def free_host(ptr):\n cptr = ct.c_void_p(ptr)\n safe_call(backend.get().af_free_host(cptr))",
"def test_delete_namespaced_egress_network_policy(self):\n pass",
"def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.add_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app1', 'epg1')\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app1-epg1'))",
"def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)",
"def vpp_lb_add_del_vip(node, **kwargs):\n if node[u\"type\"] == NodeType.DUT:\n vip_addr = kwargs.pop(u\"vip_addr\", \"0.0.0.0\")\n protocol = kwargs.pop(u\"protocol\", 255)\n port = kwargs.pop(u\"port\", 0)\n encap = kwargs.pop(u\"encap\", 0)\n dscp = kwargs.pop(u\"dscp\", 0)\n srv_type = kwargs.pop(u\"srv_type\", 0)\n target_port = kwargs.pop(u\"target_port\", 0)\n node_port = kwargs.pop(u\"node_port\", 0)\n new_len = kwargs.pop(u\"new_len\", 1024)\n src_ip_sticky = kwargs.pop(u\"src_ip_sticky\", 0)\n is_del = kwargs.pop(u\"is_del\", 0)\n\n cmd = u\"lb_add_del_vip_v2\"\n err_msg = f\"Failed to add vip on host {node[u'host']}\"\n\n vip_addr = ip_address(vip_addr).packed\n args = dict(\n pfx={\n u\"len\": 128,\n u\"address\": {u\"un\": {u\"ip4\": vip_addr}, u\"af\": 0}\n },\n protocol=protocol,\n port=port,\n encap=htonl(encap),\n dscp=dscp,\n type=srv_type,\n target_port=target_port,\n node_port=node_port,\n new_flows_table_length=int(new_len),\n src_ip_sticky=src_ip_sticky,\n is_del=is_del,\n )\n\n with PapiSocketExecutor(node) as papi_exec:\n papi_exec.add(cmd, **args).get_reply(err_msg)\n else:\n raise ValueError(\n f\"Node {node[u'host']} has unknown NodeType: '{node[u'type']}'\"\n )",
"def subnet_update(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.update_subnet(**kwargs)",
"def test_esg_gateway_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|dgw_ip', esg_gateway.delete,\n {'gateway': {}},\n # read\n read_args=['routingConfigStatic'],\n read_kwargs={'uri_parameters': {'edgeId': \"esg_id\"}},\n read_response={\n 'status': 204,\n 'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE\n },\n # update\n update_args=['routingConfigStatic'],\n update_kwargs={\n 'uri_parameters': {'edgeId': \"esg_id\"},\n 'request_body_dict': {\n 'staticRouting': {\n 'staticRoutes': {},\n 'defaultRoute': None\n }\n }\n }\n )",
"def test_destroy_nas_share_by_pool(self):\n pass",
"def sgup(sg=\"sg_external_ssh\"):\n ip = os.popen(\"/usr/bin/curl ifconfig.co 2>/dev/null\").readline().strip()\n print(\"My Public IP is : \"+ip)\n client = boto3.client(\"ec2\")\n ippermissions = client.describe_security_groups(GroupNames = [ sg ])[\"SecurityGroups\"][0][\"IpPermissions\"]\n print(\"Revoking old IP from group \"+sg)\n client.revoke_security_group_ingress(GroupName = sg, IpPermissions = ippermissions)\n printr(\"Adding new IP to group \"+sg)\n client.authorize_security_group_ingress(GroupName=sg, IpProtocol=\"-1\", FromPort=0, ToPort=0, CidrIp=ip+\"/32\")",
"def post_floating_ip_delete(self, resource_id, resource_dict):\n pass",
"def delete_network(session, name):\n # type: (Session, Text) -> None\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}/{name}\"\n return _delete(session, url_tail)",
"def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)",
"def post_virtual_ip_delete(self, resource_id, resource_dict):\n pass",
"def test_deploy_instance_with_networks_and_e2e_connection_using_public_ip(self):\n\n # skip test if suite couldn't start from an empty, clean list of allocated IPs (to avoid cascading failures)\n if self.suite_world['allocated_ips']:\n self.skipTest(\"There were pre-existing, not deallocated IPs\")\n\n # Allocate an IP\n allocated_ip = self.__allocate_ip_test_helper__()\n\n # Create Keypair\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n private_keypair_value = self.__create_keypair_test_helper__(keypair_name)\n\n # Create Router with an external network gateway\n router_name = TEST_ROUTER_PREFIX + \"_e2e_\" + suffix\n external_network_id = self.__get_external_network_test_helper__()\n router_id = self.__create_router_test_helper__(router_name, external_network_id)\n\n # Create Network\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 246\n network_id, subnet_id = self.__create_network_and_subnet_test_helper__(network_name, network_cidr)\n\n # Add interface to router\n port_id = self.neutron_operations.add_interface_router(router_id, subnet_id)\n self.test_world['ports'].append(port_id)\n\n # Deploy VM (it will have only one IP from the Public Pool)\n instance_name = TEST_SERVER_PREFIX + \"_e2e_\" + suffix\n keypair_name = TEST_KEYPAIR_PREFIX + \"_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n server_id = self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name, is_network_new=False,\n keypair_name=keypair_name, is_keypair_new=False,\n sec_group_name=sec_group_name)\n\n # Associate the public IP to Server\n self.nova_operations.add_floating_ip_to_instance(server_id=server_id, ip_address=allocated_ip)\n\n # SSH Connection\n self.__ssh_connection_test_helper__(host=allocated_ip, private_key=private_keypair_value)",
"def remove_node(self, node):\n\t\tnode.close()\n\t\taddress = (node.server_ip, node.server_port)\n\t\tself.nodes.pop(address)",
"def unlink(address):",
"def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)",
"def subnet_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_subnet(**kwargs)",
"def test_unlink_relink():\n\n topology = \"\"\"\\\n [type=host] hs1\n [type=host] hs2\n [identifier=thelink] hs1:a -- hs2:b\n \"\"\"\n\n mgr = TopologyManager(engine='docker')\n mgr.parse(topology)\n mgr.build()\n\n hs1 = mgr.get('hs1')\n hs2 = mgr.get('hs2')\n\n try:\n\n assert hs1 is not None\n assert hs2 is not None\n\n # Configure IPs\n hs1('ip link set dev a up')\n hs1('ip addr add 10.0.15.1/24 dev a')\n hs2('ip link set dev b up')\n hs2('ip addr add 10.0.15.2/24 dev b')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n # Unlink\n mgr.unlink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert 'Network is unreachable' in ping_result\n\n # Relink\n mgr.relink('thelink')\n\n # Test connection\n ping_result = hs1('ping -c 1 10.0.15.2')\n assert '1 packets transmitted, 1 received' in ping_result\n\n finally:\n mgr.unbuild()",
"def mac_pool_remove(handle, name, parent_dn=\"org-root\"):\r\n dn = parent_dn + '/mac-pool-' + name\r\n mo = handle.query_dn(dn)\r\n if mo:\r\n handle.remove_mo(mo)\r\n handle.commit()\r\n else:\r\n raise ValueError(\"MAC Pool is not available\")",
"def _delete_router_port(self, method, api, header, data):\n self._execute_api(method, api, header, data)",
"def endpoint_present(\n name,\n publicurl=None,\n internalurl=None,\n adminurl=None,\n region=None,\n profile=None,\n url=None,\n interface=None,\n **connection_args\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n _api_version(profile=profile, **connection_args)\n\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n\n def _changes(desc):\n return ret.get(\"comment\", \"\") + desc + \"\\n\"\n\n def _create_endpoint():\n if _OS_IDENTITY_API_VERSION > 2:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n url=url,\n interface=interface,\n profile=profile,\n **connection_args\n )\n else:\n ret[\"changes\"] = __salt__[\"keystone.endpoint_create\"](\n name,\n region=region,\n publicurl=publicurl,\n adminurl=adminurl,\n internalurl=internalurl,\n profile=profile,\n **connection_args\n )\n\n if endpoint and \"Error\" not in endpoint and endpoint.get(\"region\") == region:\n\n if _OS_IDENTITY_API_VERSION > 2:\n\n change_url = False\n change_interface = False\n\n if endpoint.get(\"url\", None) != url:\n ret[\"comment\"] = _changes(\n 'URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"url\", None), url\n )\n )\n change_url = True\n\n if endpoint.get(\"interface\", None) != interface:\n ret[\"comment\"] = _changes(\n 'Interface changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"interface\", None), interface\n )\n )\n change_interface = True\n\n if __opts__.get(\"test\") and (change_url or change_interface):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n return ret\n\n if change_url:\n ret[\"changes\"][\"url\"] = url\n\n if change_interface:\n ret[\"changes\"][\"interface\"] = interface\n\n else:\n change_publicurl = False\n change_adminurl = False\n change_internalurl = False\n\n if endpoint.get(\"publicurl\", None) != publicurl:\n change_publicurl = True\n\n ret[\"comment\"] = _changes(\n 'Public URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"publicurl\", None), publicurl\n )\n )\n\n if endpoint.get(\"adminurl\", None) != adminurl:\n change_adminurl = True\n ret[\"comment\"] = _changes(\n 'Admin URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"adminurl\", None), adminurl\n )\n )\n\n if endpoint.get(\"internalurl\", None) != internalurl:\n change_internalurl = True\n ret[\"comment\"] = _changes(\n 'Internal URL changes from \"{}\" to \"{}\"'.format(\n endpoint.get(\"internalurl\", None), internalurl\n )\n )\n\n if __opts__.get(\"test\") and (\n change_publicurl or change_adminurl or change_internalurl\n ):\n ret[\"result\"] = None\n ret[\"comment\"] += 'Endpoint for service \"{}\" will be updated'.format(\n name\n )\n ret[\"changes\"][\"Endpoint\"] = \"Will be updated\"\n return ret\n\n if change_publicurl:\n ret[\"changes\"][\"publicurl\"] = publicurl\n\n if change_adminurl:\n ret[\"changes\"][\"adminurl\"] = adminurl\n\n if change_internalurl:\n ret[\"changes\"][\"internalurl\"] = internalurl\n\n if ret[\"comment\"]: # changed\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n _create_endpoint()\n ret[\"comment\"] += 'Endpoint for service \"{}\" has been updated'.format(name)\n\n else:\n # Add new endpoint\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"changes\"][\"Endpoint\"] = \"Will be created\"\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be added'.format(name)\n return ret\n _create_endpoint()\n ret[\"comment\"] = 'Endpoint for service \"{}\" has been added'.format(name)\n\n if ret[\"comment\"] == \"\": # => no changes\n ret[\"comment\"] = 'Endpoint for service \"{}\" already exists'.format(name)\n return ret",
"def delete(self, oid):\n path = '%s/networks/%s' % (self.ver, oid)\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack network: %s' % truncate(res))\n return res[0]",
"def delete_network_profile(arn=None):\n pass",
"def connection(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"connection\", public_id)",
"def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data",
"def paths_revoke_network_block(ctx, network, destination, source, port):\n source_block = cloudless.paths.CidrBlock(source)\n destination_service = get_service_for_cli(ctx, network, destination)\n ctx.obj['CLIENT'].paths.remove(source_block, destination_service, port)\n click.echo('Removed path from %s to %s in network %s for port %s' % (source, destination,\n network, port))",
"def test_create_host_subnet(self):\n pass",
"def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)",
"def delete_network_segments(self, tenant_id, network_segments):",
"def test_patch_namespaced_egress_network_policy(self):\n pass",
"def _delete_local_endpoint(self, resource, event, trigger, **kwargs):\n router_id = kwargs.get('router_id')\n # delete the local endpoint from the NSX\n local_ep_id = self._search_local_endpint(router_id)\n if local_ep_id:\n self._nsx_vpn.local_endpoint.delete(local_ep_id)\n # delete the neutron port with this IP\n ctx = n_context.get_admin_context()\n port = self._find_vpn_service_port(ctx, router_id)\n if port:\n self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True)",
"def update_endpoint(self, endpoint):\n exists = self.get_endpoint(endpoint)\n if exists:\n self.delete_endpoint(endpoint)\n self.add_endpoint(endpoint)",
"def put_network_object(session, key, data):\n # type: (Session, Text, Any) -> None\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_OBJECTS\n )\n _put_stream(session, url_tail, data, {CoordConstsV2.QP_KEY: key})",
"def post_delete_subnet(self, sender, instance, **kwargs):\n RecurseNetworks.delete_entries(subnet=str(instance.ip_network), net_name=instance.name)",
"def test_deploy_instance_with_new_network_and_sec_group(self):\n suffix = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n instance_name = TEST_SERVER_PREFIX + \"_network_sec_group_\" + suffix\n sec_group_name = TEST_SEC_GROUP_PREFIX + \"_\" + suffix\n network_name = TEST_NETWORK_PREFIX + \"_\" + suffix\n network_cidr = TEST_CIDR_PATTERN % 249\n self.__deploy_instance_helper__(instance_name=instance_name,\n network_name=network_name,\n network_cidr=network_cidr,\n sec_group_name=sec_group_name)",
"def test_host_routes_create_two_subnets_then_delete_one(self):\n gateway_ips = ['10.0.1.1', '10.0.2.1']\n cidrs = ['10.0.1.0/24', '10.0.2.0/24']\n net, subnet0, subnet1 = self._create_subnets_segments(gateway_ips,\n cidrs)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n self.assertEqual([{'destination': cidrs[0],\n 'nexthop': gateway_ips[1]}],\n sub_res['subnet']['host_routes'])\n\n del_req = self.new_delete_request('subnets', subnet0['id'])\n del_req.get_response(self.api)\n\n sh_req = self.new_show_request('subnets', subnet1['id'])\n raw_res = sh_req.get_response(self.api)\n sub_res = self.deserialize(self.fmt, raw_res)\n\n self.assertEqual([], sub_res['subnet']['host_routes'])",
"def contract(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"contract\", public_id)",
"def post_physical_router_delete(self, resource_id, resource_dict):\n pass",
"def test_basic_remove_endpoint(self):\n args = self.get_args()\n config = self.create_config_file()\n config['config'].append(self.create_export_policy())\n self.write_config_file(config, args)\n\n collector = execute_tool(args, test_mode=True)\n\n time.sleep(2)\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))\n\n config = self.create_config_file()\n self.write_config_file(config, args)\n collector.reload_config()\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg'))",
"def delete_public_ip(self, ip=None):\n raise NotImplementedError",
"def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True",
"def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)",
"def test_bgp_neighbour_uninstall(self):\n self._common_uninstall_read_update(\n 'esg_id|ip|remoteAS|protocolIp|forwardingIp',\n dlr_bgp_neighbour.delete,\n {},\n read_args=['routingBGP'],\n read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},\n read_response={\n 'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,\n 'status': 204\n },\n update_args=['routingBGP'],\n update_kwargs={\n 'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,\n 'uri_parameters': {'edgeId': 'esg_id'}\n }\n )",
"def net_undefine(network, server, virt=\"Xen\"):\n\n cmd = \"virsh -c %s net-undefine %s 2>/dev/null\" % (virt2uri(virt), network)\n ret, out = run_remote(server, cmd)\n\n return ret",
"def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})",
"def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)"
] |
[
"0.57125086",
"0.5607883",
"0.55533296",
"0.54408795",
"0.52399385",
"0.52079135",
"0.50849366",
"0.5009823",
"0.49770507",
"0.49518353",
"0.4925341",
"0.49161455",
"0.48942116",
"0.48699126",
"0.48663092",
"0.4848325",
"0.47907278",
"0.47718734",
"0.476847",
"0.47496375",
"0.47113106",
"0.46978343",
"0.468314",
"0.4680233",
"0.4671128",
"0.46518168",
"0.46516562",
"0.46461722",
"0.4634961",
"0.4610559",
"0.46033147",
"0.45978075",
"0.4584389",
"0.45652565",
"0.45638222",
"0.45591933",
"0.45456555",
"0.45454818",
"0.4543817",
"0.45371902",
"0.45316735",
"0.45296323",
"0.45295498",
"0.4523949",
"0.45218635",
"0.4478802",
"0.44636032",
"0.44592598",
"0.44509578",
"0.4443601",
"0.44419894",
"0.44371194",
"0.44259924",
"0.44239137",
"0.44165206",
"0.4397404",
"0.43936202",
"0.43848205",
"0.43825224",
"0.43744308",
"0.4365065",
"0.43645218",
"0.43639407",
"0.4360882",
"0.4358993",
"0.43532097",
"0.43426195",
"0.43376708",
"0.4332163",
"0.4331317",
"0.43297693",
"0.4327675",
"0.4325728",
"0.43199134",
"0.43132678",
"0.43130794",
"0.42993045",
"0.42962766",
"0.4294665",
"0.4292479",
"0.42917922",
"0.42850015",
"0.42770198",
"0.42765862",
"0.4270826",
"0.42686638",
"0.42667192",
"0.4266613",
"0.42623007",
"0.4259859",
"0.42533687",
"0.42530146",
"0.42514378",
"0.42502618",
"0.42437944",
"0.42425564",
"0.4241606",
"0.42326367",
"0.4230244",
"0.42251906"
] |
0.49001652
|
12
|
> This operation can reset only the password of the root account of an instance.
|
def reset_account_password_with_options(
self,
request: dds_20151201_models.ResetAccountPasswordRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.ResetAccountPasswordResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_name):
query['AccountName'] = request.account_name
if not UtilClient.is_unset(request.account_password):
query['AccountPassword'] = request.account_password
if not UtilClient.is_unset(request.character_type):
query['CharacterType'] = request.character_type
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ResetAccountPassword',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.ResetAccountPasswordResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset_password():\n pass",
"def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):",
"def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)",
"def reset_password(cursor: Cursor, owner: Owner) -> Result:\n return pgsql.reset_password(cursor, owner_name(owner))",
"def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)",
"def reset_token(sender, instance, **kwargs):\n new_password = instance.password\n\n try:\n old_password = User.objects.get(pk=instance.pk).password\n except User.DoesNotExist:\n old_password = None\n\n if new_password != old_password:\n Token.objects.filter(user=instance).delete()",
"def change_root_password(self, *args, **kwargs):\r\n return execute(self._change_root_password, *args, **kwargs)",
"def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset_ldap_password(username):\n \n from django_ldap_pixiedust.user import SynchronisingUserAdapter\n backend = LDAPBackend()\n user = User.objects.get(username=username)\n ldap_user = backend.get_user(user.id)\n sync = SynchronisingUserAdapter(ldap_user)\n sync.reset_ldap_password()",
"def request_password_reset():",
"def reset_password(connection,password,username):\r\n with connection:\r\n connection.execute(RESET_PASSWORD,(password,username))",
"def reset(email, password):\n try:\n pwd = hash_password(password)\n u = User.query.filter(User.email == email).first()\n u.password = pwd\n try:\n db.session.commit()\n print('User password has been reset successfully.')\n except:\n db.session.rollback()\n except Exception as e:\n print('Error resetting user password: %s' % e)",
"def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1",
"def reset(name, runas=None):\n return prlctl(\"reset\", salt.utils.data.decode(name), runas=runas)",
"def reset_password(newpass, challenge):",
"def reset_account_password(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n runtime = util_models.RuntimeOptions()\n return self.reset_account_password_with_options(request, runtime)",
"def password_reset(self, password, vtoken, welcomeEmailTemplate = ''):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&vtoken=' + vtoken\n payload = {'password': password}\n url = SECURE_API_URL + \"raas/v1/account/password/reset\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"async def reset_password(\n rb_client: LDAPConnection, smtp_client: RBMail, commit: bool, username: str,\n) -> int:\n async with rb_client.connect() as conn:\n password = generate_passwd(12)\n if commit:\n await conn.modify(\n f\"uid={username},ou=accounts,o=redbrick\",\n {\"userPassword\": [(MODIFY_REPLACE, [password])]},\n )\n results = await conn.search(\n \"ou=accounts,o=redbrick\",\n f\"(|(uid={username})(gecos={username}))\",\n attributes=[\"altmail\"],\n )\n async with smtp_client:\n await smtp_client.send_password_reset(\n results[0][\"attributes\"][\"altmail\"][0], username, password\n )\n print(f\"{username} password has been reset\")\n return 0",
"def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)",
"def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"",
"def reset_password(self, new_password=None):\n if not new_password:\n new_password = pwgen(10)\n pg = postgresql_svc.PostgreSql()\n if pg.master_user.exists():\n pg.master_user.change_role_password(new_password)\n pg.master_user.change_system_password(new_password)\n else:\n pg.create_linux_user(pg.master_user.name, new_password)\n pg.create_pg_role(pg.master_user.name,\n new_password,\n super=True,\n force=False)\n return new_password",
"def setpassword(self, pwd):\n pass",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))",
"def change_password(change_account):\n change_data(change_account, changed_data='password')",
"def test_patient_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/patient/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))",
"def reboot(self, node):",
"def set_admin_password(self, instance, new_pass):\n pass",
"def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200",
"def test_api_user_reset_password_post(self):\n pass",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def test_reset_user(self):\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n data = {\"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def change_password(self, reset_token, new_password_hash):\n try:\n self.logger.debug('change_password running')\n nosqldb = self.pers.nosql_db\n reset_request = nosqldb['passwordResets'].find_one(\n {'resetToken': reset_token}\n )\n\n if reset_request:\n self.logger.debug('reset request match')\n nosqldb['users'].update_one(\n {\n 'username': reset_request['username']\n },\n {\n '$set': {'passwdHash': new_password_hash}\n }\n\n\n )\n else:\n self.logger.debug('reset request mismatch, nothing changed')\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]",
"def test_password_reset(client, models):\n user = models[\"user\"][0]\n encoded_token = user.get_reset_token()\n new_password = \"password\"\n response = client.post(\n f\"/password/reset/{encoded_token}\", json={\"password\": new_password}\n )\n assert response.status_code == 200\n assert user.check_password(new_password)",
"def restart_salt():\n stop_salt()\n start_salt()",
"def reboot_instance(InstanceId=None):\n pass",
"def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))",
"def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()",
"def reset_ilo_credential(self, password):\n acc_uri = '/rest/v1/AccountService/Accounts'\n\n for status, hds, account, memberuri in self._get_collection(acc_uri):\n if account['UserName'] == self.login:\n mod_user = {}\n mod_user['Password'] = password\n status, headers, response = self._rest_patch(memberuri,\n None, mod_user)\n if status != 200:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n return\n\n msg = \"iLO Account with specified username is not found.\"\n raise exception.IloError(msg)",
"def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)",
"def test_reset_passwd_bad_token(self, test_client):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=str(\n create_access_token(identity=UserModel(uuid=uuid.uuid4()))),\n password=\"Azerty!123\"))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['status'] == False",
"def unconfigure_service_password_encryption(device):\n\n try:\n device.configure(\"no service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not unconfigure service password encryption\"\n )",
"def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()",
"def ChangePasswordWithRPToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def test_dietitian_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/dietitian/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reset(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.ResetVM_Task())",
"def reset_server_database_password(self, server_id, database_id):\n response = self._api_request(\n endpoint='application/servers/{}/databases/{}'\n '/reset-password'.format(server_id, database_id),\n mode='POST')\n return response",
"def test_reset_password(self):\n self.register()\n response = self.client.put(self.create_url(\"chegemaggie1@gmail.com\"),\n self.test_update_password_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_account_modification_superuser_wrong_pw(flask_server, create_account):\n import requests\n\n config = flask_server\n data = {\n 'superuserpassword': '123',\n 'name': create_account['name'],\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'Wrong superuserpassword'\n assert req.status_code == 400",
"def test_password_modify_extop(self):\n user_dn = LDAPDN(\"cn=skip,ou=nerdherd,dc=bonsai,dc=test\")\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"p@ssword\"))\n conn = cli.connect()\n self.assertRaises(TypeError,\n lambda: conn.modify_password(new_password=0))\n conn.modify_password(user_dn, \"newpassword\", \"p@ssword\")\n conn.close()\n self.assertRaises(ClosedConnection, conn.modify_password)\n try:\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"newpassword\"))\n cli.set_password_policy(True)\n conn, ctrl = cli.connect()\n newpass = conn.modify_password()\n conn.close()\n self.assertIsInstance(newpass, str)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), newpass))\n conn, ctrl = cli.connect()\n conn.close()\n except bonsai.AuthenticationError:\n self.fail(\"Failed to authenticate with the new password.\")\n finally:\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"userPassword\"])[0]\n entry['userPassword'] = \"p@ssword\"\n entry.modify()\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"pwdChangeTime\",\n \"pwdGraceUseTime\"])[0]\n if (\"pwdChangeTime\", \"pwdGraceUseTime\") in entry.keys():\n del entry['pwdChangeTime']\n del entry['pwdGraceUseTime']\n entry.modify()",
"def reset_password(token_value, password, confirm_password):\n user = api.token.find_key_by_token(\"password_reset\", token_value)\n if user is None:\n raise PicoException(\"Invalid password reset token\", 422)\n api.user.update_password_request(\n {\"new-password\": password, \"new-password-confirmation\": confirm_password},\n uid=user[\"uid\"],\n )\n\n api.token.delete_token({\"uid\": user[\"uid\"]}, \"password_reset\")",
"def test_reset(u_boot_console):\n\n u_boot_console.run_command('reset', wait_for_prompt=False)\n assert(u_boot_console.validate_exited())",
"def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )",
"def wifi_password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def account_api_password_reset(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_reset.html')\n profile = request.user\n modified = profile.has_usable_api_password()\n api_password = User.objects.make_random_password(settings.API_PASSWORD_LENGTH, settings.API_PASSWORD_CHARACTERS)\n profile.set_api_password(api_password)\n profile.save()\n profile.log_change(request.user, \"Generated new api password.\")\n return render(request, 'agda/account/api_password_reset.html', dict(api_password=api_password, modified=modified))",
"def test_aws_service_api_vm_password_get(self):\n pass",
"def test_replacePasswordWrong(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n d = account.replacePassword(u'blahblah', u'blah')\n perform()\n perform()\n self.failureResultOf(d, errors.BadCredentials)",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return",
"def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}",
"def disable_root_login():\n sudo('passwd --lock root')",
"def test_reset_user_username_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"test_user\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n # NOTE(amelia): Requiring both username and email here may be\n # a slight issue for various UIs as typically a\n # forgotten password screen only asks for the\n # email address, however there isn't a very\n # good way to address this as keystone doesn't\n # store emails in their own field\n # Currently this is an issue for the forked adjutant\n # horizon\n data = {\"email\": \"test@example.com\", \"username\": \"test_user\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Password Reset for OpenStack\")\n self.assertEqual(mail.outbox[0].to[0], \"test@example.com\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def test_forced_password_change(self):\r\n\r\n student_email, student_password = self._setup_user()\r\n staff_email, staff_password = self._setup_user(is_staff=True)\r\n\r\n self._login(student_email, student_password)\r\n self._login(staff_email, staff_password)\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=1)\r\n with freeze_time(staff_reset_time):\r\n self._login(student_email, student_password)\r\n\r\n # staff should fail because password expired\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n\r\n # if we reset the password, we should be able to log in\r\n self._update_password(staff_email, \"updated\")\r\n self._login(staff_email, \"updated\")\r\n\r\n student_reset_time = timezone.now() + timedelta(days=5)\r\n with freeze_time(student_reset_time):\r\n # Both staff and student logins should fail because user must\r\n # reset the password\r\n\r\n self._login(student_email, student_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(student_email, \"updated\")\r\n self._login(student_email, \"updated\")\r\n\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(staff_email, \"updated2\")\r\n self._login(staff_email, \"updated2\")",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def root_password(self) -> str:\n return pulumi.get(self, \"root_password\")",
"def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")",
"def change_password(reset_code):\n return dict(reset_code=reset_code)",
"def test_set_user_password(self):\n pass",
"def reset_password(token, new_password):\r\n\r\n\t\tuser = AuthTools.get_user_from_token(token, AuthTools.password_salt)\r\n\r\n\t\tif user is not None:\r\n\t\t\tuser.set_password(new_password)\r\n\t\t\tuser.save()\r\n\t\t\treturn user\r\n\r\n\t\treturn None",
"def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data",
"def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))",
"def test_setPassword(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.setPassword(u'blahblah')\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)\n d = self._requestAvatarId(UsernamePassword(username, account.passwordHash))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)",
"def reset_password(self, login):\n users = self.search([('login', '=', login)])\n if not users:\n users = self.search([('email', '=', login)])\n if len(users) != 1 or (users.state != 'active' or users.approval_status == 'rejected'):\n raise Exception(_('Reset password: invalid username or email'))\n return users.action_reset_password()",
"def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()",
"def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")",
"def password(self, oid):\n path = '/users/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack user: %s' % truncate(res))\n return res[0]['server']",
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def reset_password(token, new_password):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n user = User.query.get(data.get(\"reset\"))\n if user is None:\n return False\n user.password = new_password\n db.session.add(user)\n return True",
"async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}",
"def test_user_resetpassword(self):\n\n data = {\n \"email\": \"testuser@gmail.com\"\n }\n response = self.client.post(reverse('account:request-reset-email'), data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(email=\"testuser@gmail.com\")\n uidb64 = urlsafe_base64_encode(smart_bytes(user.id))\n token = PasswordResetTokenGenerator().make_token(user)\n relativeLink = reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n )\n resetconfirm_response = self.client.get(reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n ))\n\n self.assertEqual(resetconfirm_response.status_code, status.HTTP_200_OK)\n reset_data = {\n \"uidb64\": resetconfirm_response.data[\"uidb64\"],\n \"token\": resetconfirm_response.data[\"token\"],\n \"password\": \"1234567\"\n }\n resetcomplete_response = self.client.patch(reverse('account:password-reset-complete'), reset_data,\n format=\"json\")\n self.assertEqual(resetcomplete_response.status_code, status.HTTP_200_OK)\n # test if now user can login with new password\n updatedlogin_data = {\n \"email\": \"testuser@gmail.com\",\n \"password\": \"1234567\"\n }\n\n response = self.client.post(self.login_url, updatedlogin_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test if user can login with old password\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)",
"def reset_chroot(self):\n try:\n if self.HAS_CHROOT:\n task = reset_ldap_users.post()\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=120000)\n except Exception, e:\n print \"Exception reseting chroot home folders.\"\n raise",
"def reset_password(self, old_password, new_password):\n verb = \"POST\"\n url = urljoiner(self.baseurl, [self.path, \"$me\", \"password\"])\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = {\"oldPassword\": old_password,\n \"newPassword\": new_password}\n\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.post(url, data=data, headers=headers)\n self.handle_error_message(r)\n print(\"password successfully reset!\")\n self.auth_data['password'] = new_password\n try:\n self.login()\n except Exception as e:\n pass",
"def reset_password():\n password = request.get_json().get('password')\n access_token = authentication_request()\n\n if access_token:\n # Attempt to decode the token and get the User ID\n user_id = Users.decode_token(access_token)\n if not isinstance(user_id, str):\n user = Users.query.filter_by(id=user_id).first()\n try:\n if not user:\n raise exceptions.NotFound()\n\n valid_password = check_password_validation(password)\n user.password = Users.hash_password(valid_password)\n user.save()\n # db.session.commit()\n return {\"message\": \"you have succesfuly reset your password\"}, status.HTTP_200_OK\n \n except Exception as error:\n \n return {\"message\": str(error)}, status.HTTP_200_OK\n \n else:\n return {\"message\": user_id}, status.HTTP_401_UNAUTHORIZED\n\n return {\"message\": \"Provide a valid authentication token\"}, status.HTTP_403_FORBIDDEN",
"def test_57_reset_api_key(self):\r\n url = \"/account/johndoe/update\"\r\n # Anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Anonymous user should be redirected for authentication\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n res = self.app.post(url, follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Authenticated user\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n url = \"/account/%s/update\" % user.name\r\n api_key = user.api_key\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should get access to reset api key page\"\r\n assert res.status_code == 200, err_msg\r\n assert \"reset your personal API Key\" in res.data, err_msg\r\n url = \"/account/%s/resetapikey\" % user.name\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should be able to reset his api key\"\r\n assert res.status_code == 200, err_msg\r\n user = db.session.query(User).get(1)\r\n err_msg = \"New generated API key should be different from old one\"\r\n assert api_key != user.api_key, err_msg\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n res = self.app.post(url)\r\n res.status_code == 403\r\n\r\n url = \"/account/fake/resetapikey\"\r\n res = self.app.post(url)\r\n assert res.status_code == 404",
"def restore_encryption_password(session, password, return_type=None, **kwargs):\n verify_not_none(password, \"password\")\n\n body_values = {'encryption_pwd': password}\n\n path = '/api/settings/restore_encryption.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)"
] |
[
"0.6995449",
"0.6721432",
"0.6687601",
"0.6683846",
"0.65477824",
"0.6516542",
"0.6435638",
"0.63833916",
"0.6339373",
"0.6333628",
"0.6329721",
"0.62919116",
"0.62820923",
"0.61816514",
"0.6117711",
"0.6081572",
"0.6053006",
"0.60414654",
"0.60382646",
"0.60087836",
"0.6008431",
"0.59336513",
"0.59251386",
"0.5906148",
"0.5906148",
"0.59039086",
"0.58764625",
"0.58674634",
"0.5806162",
"0.5806162",
"0.57951725",
"0.57824266",
"0.57816637",
"0.57783586",
"0.57774836",
"0.5775901",
"0.57467026",
"0.5737608",
"0.5735618",
"0.571653",
"0.56865793",
"0.5650088",
"0.5647733",
"0.5646782",
"0.56457967",
"0.5640421",
"0.56334937",
"0.5631319",
"0.56161225",
"0.56140697",
"0.5608202",
"0.55860347",
"0.55853844",
"0.55786425",
"0.5550942",
"0.55463994",
"0.5542332",
"0.5535023",
"0.5520207",
"0.5502748",
"0.55018586",
"0.54994506",
"0.54992926",
"0.54982114",
"0.5497104",
"0.5485136",
"0.54799515",
"0.54755557",
"0.5467995",
"0.5466483",
"0.54646856",
"0.5464552",
"0.5464163",
"0.5462917",
"0.54525423",
"0.5451178",
"0.5438148",
"0.54353017",
"0.54286844",
"0.542352",
"0.5421462",
"0.54176265",
"0.5417204",
"0.5416352",
"0.5416148",
"0.54144216",
"0.5412356",
"0.5408199",
"0.5403121",
"0.53998756",
"0.53985876",
"0.539254",
"0.5375197",
"0.5368484",
"0.5363252",
"0.5356125",
"0.5354908",
"0.53346187",
"0.53304166",
"0.53296465"
] |
0.5785506
|
31
|
> This operation can reset only the password of the root account of an instance.
|
async def reset_account_password_with_options_async(
self,
request: dds_20151201_models.ResetAccountPasswordRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.ResetAccountPasswordResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.account_name):
query['AccountName'] = request.account_name
if not UtilClient.is_unset(request.account_password):
query['AccountPassword'] = request.account_password
if not UtilClient.is_unset(request.character_type):
query['CharacterType'] = request.character_type
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='ResetAccountPassword',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.ResetAccountPasswordResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset_password():\n pass",
"def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):",
"def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)",
"def reset_password(cursor: Cursor, owner: Owner) -> Result:\n return pgsql.reset_password(cursor, owner_name(owner))",
"def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)",
"def reset_token(sender, instance, **kwargs):\n new_password = instance.password\n\n try:\n old_password = User.objects.get(pk=instance.pk).password\n except User.DoesNotExist:\n old_password = None\n\n if new_password != old_password:\n Token.objects.filter(user=instance).delete()",
"def change_root_password(self, *args, **kwargs):\r\n return execute(self._change_root_password, *args, **kwargs)",
"def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset_ldap_password(username):\n \n from django_ldap_pixiedust.user import SynchronisingUserAdapter\n backend = LDAPBackend()\n user = User.objects.get(username=username)\n ldap_user = backend.get_user(user.id)\n sync = SynchronisingUserAdapter(ldap_user)\n sync.reset_ldap_password()",
"def request_password_reset():",
"def reset_password(connection,password,username):\r\n with connection:\r\n connection.execute(RESET_PASSWORD,(password,username))",
"def reset(email, password):\n try:\n pwd = hash_password(password)\n u = User.query.filter(User.email == email).first()\n u.password = pwd\n try:\n db.session.commit()\n print('User password has been reset successfully.')\n except:\n db.session.rollback()\n except Exception as e:\n print('Error resetting user password: %s' % e)",
"def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1",
"def reset(name, runas=None):\n return prlctl(\"reset\", salt.utils.data.decode(name), runas=runas)",
"def reset_password(newpass, challenge):",
"def reset_account_password(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n runtime = util_models.RuntimeOptions()\n return self.reset_account_password_with_options(request, runtime)",
"def password_reset(self, password, vtoken, welcomeEmailTemplate = ''):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&vtoken=' + vtoken\n payload = {'password': password}\n url = SECURE_API_URL + \"raas/v1/account/password/reset\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"async def reset_password(\n rb_client: LDAPConnection, smtp_client: RBMail, commit: bool, username: str,\n) -> int:\n async with rb_client.connect() as conn:\n password = generate_passwd(12)\n if commit:\n await conn.modify(\n f\"uid={username},ou=accounts,o=redbrick\",\n {\"userPassword\": [(MODIFY_REPLACE, [password])]},\n )\n results = await conn.search(\n \"ou=accounts,o=redbrick\",\n f\"(|(uid={username})(gecos={username}))\",\n attributes=[\"altmail\"],\n )\n async with smtp_client:\n await smtp_client.send_password_reset(\n results[0][\"attributes\"][\"altmail\"][0], username, password\n )\n print(f\"{username} password has been reset\")\n return 0",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)",
"def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"",
"def reset_password(self, new_password=None):\n if not new_password:\n new_password = pwgen(10)\n pg = postgresql_svc.PostgreSql()\n if pg.master_user.exists():\n pg.master_user.change_role_password(new_password)\n pg.master_user.change_system_password(new_password)\n else:\n pg.create_linux_user(pg.master_user.name, new_password)\n pg.create_pg_role(pg.master_user.name,\n new_password,\n super=True,\n force=False)\n return new_password",
"def setpassword(self, pwd):\n pass",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))",
"def reset_account_password_with_options(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.account_name):\n query['AccountName'] = request.account_name\n if not UtilClient.is_unset(request.account_password):\n query['AccountPassword'] = request.account_password\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ResetAccountPassword',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ResetAccountPasswordResponse(),\n self.call_api(params, req, runtime)\n )",
"def change_password(change_account):\n change_data(change_account, changed_data='password')",
"def test_patient_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/patient/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def reboot(self, node):",
"def set_admin_password(self, instance, new_pass):\n pass",
"def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200",
"def test_api_user_reset_password_post(self):\n pass",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def test_reset_user(self):\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n data = {\"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def change_password(self, reset_token, new_password_hash):\n try:\n self.logger.debug('change_password running')\n nosqldb = self.pers.nosql_db\n reset_request = nosqldb['passwordResets'].find_one(\n {'resetToken': reset_token}\n )\n\n if reset_request:\n self.logger.debug('reset request match')\n nosqldb['users'].update_one(\n {\n 'username': reset_request['username']\n },\n {\n '$set': {'passwdHash': new_password_hash}\n }\n\n\n )\n else:\n self.logger.debug('reset request mismatch, nothing changed')\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]",
"def test_password_reset(client, models):\n user = models[\"user\"][0]\n encoded_token = user.get_reset_token()\n new_password = \"password\"\n response = client.post(\n f\"/password/reset/{encoded_token}\", json={\"password\": new_password}\n )\n assert response.status_code == 200\n assert user.check_password(new_password)",
"def restart_salt():\n stop_salt()\n start_salt()",
"def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))",
"def reboot_instance(InstanceId=None):\n pass",
"def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()",
"def reset_ilo_credential(self, password):\n acc_uri = '/rest/v1/AccountService/Accounts'\n\n for status, hds, account, memberuri in self._get_collection(acc_uri):\n if account['UserName'] == self.login:\n mod_user = {}\n mod_user['Password'] = password\n status, headers, response = self._rest_patch(memberuri,\n None, mod_user)\n if status != 200:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n return\n\n msg = \"iLO Account with specified username is not found.\"\n raise exception.IloError(msg)",
"def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)",
"def test_reset_passwd_bad_token(self, test_client):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=str(\n create_access_token(identity=UserModel(uuid=uuid.uuid4()))),\n password=\"Azerty!123\"))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['status'] == False",
"def unconfigure_service_password_encryption(device):\n\n try:\n device.configure(\"no service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not unconfigure service password encryption\"\n )",
"def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()",
"def ChangePasswordWithRPToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def test_dietitian_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/dietitian/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reset(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.ResetVM_Task())",
"def reset_server_database_password(self, server_id, database_id):\n response = self._api_request(\n endpoint='application/servers/{}/databases/{}'\n '/reset-password'.format(server_id, database_id),\n mode='POST')\n return response",
"def test_reset_password(self):\n self.register()\n response = self.client.put(self.create_url(\"chegemaggie1@gmail.com\"),\n self.test_update_password_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_password_modify_extop(self):\n user_dn = LDAPDN(\"cn=skip,ou=nerdherd,dc=bonsai,dc=test\")\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"p@ssword\"))\n conn = cli.connect()\n self.assertRaises(TypeError,\n lambda: conn.modify_password(new_password=0))\n conn.modify_password(user_dn, \"newpassword\", \"p@ssword\")\n conn.close()\n self.assertRaises(ClosedConnection, conn.modify_password)\n try:\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"newpassword\"))\n cli.set_password_policy(True)\n conn, ctrl = cli.connect()\n newpass = conn.modify_password()\n conn.close()\n self.assertIsInstance(newpass, str)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), newpass))\n conn, ctrl = cli.connect()\n conn.close()\n except bonsai.AuthenticationError:\n self.fail(\"Failed to authenticate with the new password.\")\n finally:\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"userPassword\"])[0]\n entry['userPassword'] = \"p@ssword\"\n entry.modify()\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"pwdChangeTime\",\n \"pwdGraceUseTime\"])[0]\n if (\"pwdChangeTime\", \"pwdGraceUseTime\") in entry.keys():\n del entry['pwdChangeTime']\n del entry['pwdGraceUseTime']\n entry.modify()",
"def test_account_modification_superuser_wrong_pw(flask_server, create_account):\n import requests\n\n config = flask_server\n data = {\n 'superuserpassword': '123',\n 'name': create_account['name'],\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'Wrong superuserpassword'\n assert req.status_code == 400",
"def test_reset(u_boot_console):\n\n u_boot_console.run_command('reset', wait_for_prompt=False)\n assert(u_boot_console.validate_exited())",
"def reset_password(token_value, password, confirm_password):\n user = api.token.find_key_by_token(\"password_reset\", token_value)\n if user is None:\n raise PicoException(\"Invalid password reset token\", 422)\n api.user.update_password_request(\n {\"new-password\": password, \"new-password-confirmation\": confirm_password},\n uid=user[\"uid\"],\n )\n\n api.token.delete_token({\"uid\": user[\"uid\"]}, \"password_reset\")",
"def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )",
"def wifi_password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def account_api_password_reset(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_reset.html')\n profile = request.user\n modified = profile.has_usable_api_password()\n api_password = User.objects.make_random_password(settings.API_PASSWORD_LENGTH, settings.API_PASSWORD_CHARACTERS)\n profile.set_api_password(api_password)\n profile.save()\n profile.log_change(request.user, \"Generated new api password.\")\n return render(request, 'agda/account/api_password_reset.html', dict(api_password=api_password, modified=modified))",
"def test_aws_service_api_vm_password_get(self):\n pass",
"def test_replacePasswordWrong(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n d = account.replacePassword(u'blahblah', u'blah')\n perform()\n perform()\n self.failureResultOf(d, errors.BadCredentials)",
"def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def disable_root_login():\n sudo('passwd --lock root')",
"def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}",
"def test_forced_password_change(self):\r\n\r\n student_email, student_password = self._setup_user()\r\n staff_email, staff_password = self._setup_user(is_staff=True)\r\n\r\n self._login(student_email, student_password)\r\n self._login(staff_email, staff_password)\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=1)\r\n with freeze_time(staff_reset_time):\r\n self._login(student_email, student_password)\r\n\r\n # staff should fail because password expired\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n\r\n # if we reset the password, we should be able to log in\r\n self._update_password(staff_email, \"updated\")\r\n self._login(staff_email, \"updated\")\r\n\r\n student_reset_time = timezone.now() + timedelta(days=5)\r\n with freeze_time(student_reset_time):\r\n # Both staff and student logins should fail because user must\r\n # reset the password\r\n\r\n self._login(student_email, student_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(student_email, \"updated\")\r\n self._login(student_email, \"updated\")\r\n\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(staff_email, \"updated2\")\r\n self._login(staff_email, \"updated2\")",
"def test_reset_user_username_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"test_user\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n # NOTE(amelia): Requiring both username and email here may be\n # a slight issue for various UIs as typically a\n # forgotten password screen only asks for the\n # email address, however there isn't a very\n # good way to address this as keystone doesn't\n # store emails in their own field\n # Currently this is an issue for the forked adjutant\n # horizon\n data = {\"email\": \"test@example.com\", \"username\": \"test_user\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Password Reset for OpenStack\")\n self.assertEqual(mail.outbox[0].to[0], \"test@example.com\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def root_password(self) -> str:\n return pulumi.get(self, \"root_password\")",
"def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")",
"def change_password(reset_code):\n return dict(reset_code=reset_code)",
"def test_set_user_password(self):\n pass",
"def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))",
"def test_setPassword(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.setPassword(u'blahblah')\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)\n d = self._requestAvatarId(UsernamePassword(username, account.passwordHash))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)",
"def reset_password(token, new_password):\r\n\r\n\t\tuser = AuthTools.get_user_from_token(token, AuthTools.password_salt)\r\n\r\n\t\tif user is not None:\r\n\t\t\tuser.set_password(new_password)\r\n\t\t\tuser.save()\r\n\t\t\treturn user\r\n\r\n\t\treturn None",
"def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data",
"def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()",
"def reset_password(self, login):\n users = self.search([('login', '=', login)])\n if not users:\n users = self.search([('email', '=', login)])\n if len(users) != 1 or (users.state != 'active' or users.approval_status == 'rejected'):\n raise Exception(_('Reset password: invalid username or email'))\n return users.action_reset_password()",
"def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")",
"def password(self, oid):\n path = '/users/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack user: %s' % truncate(res))\n return res[0]['server']",
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def reset_password(token, new_password):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n user = User.query.get(data.get(\"reset\"))\n if user is None:\n return False\n user.password = new_password\n db.session.add(user)\n return True",
"async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}",
"def test_user_resetpassword(self):\n\n data = {\n \"email\": \"testuser@gmail.com\"\n }\n response = self.client.post(reverse('account:request-reset-email'), data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(email=\"testuser@gmail.com\")\n uidb64 = urlsafe_base64_encode(smart_bytes(user.id))\n token = PasswordResetTokenGenerator().make_token(user)\n relativeLink = reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n )\n resetconfirm_response = self.client.get(reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n ))\n\n self.assertEqual(resetconfirm_response.status_code, status.HTTP_200_OK)\n reset_data = {\n \"uidb64\": resetconfirm_response.data[\"uidb64\"],\n \"token\": resetconfirm_response.data[\"token\"],\n \"password\": \"1234567\"\n }\n resetcomplete_response = self.client.patch(reverse('account:password-reset-complete'), reset_data,\n format=\"json\")\n self.assertEqual(resetcomplete_response.status_code, status.HTTP_200_OK)\n # test if now user can login with new password\n updatedlogin_data = {\n \"email\": \"testuser@gmail.com\",\n \"password\": \"1234567\"\n }\n\n response = self.client.post(self.login_url, updatedlogin_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test if user can login with old password\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)",
"def reset_chroot(self):\n try:\n if self.HAS_CHROOT:\n task = reset_ldap_users.post()\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=120000)\n except Exception, e:\n print \"Exception reseting chroot home folders.\"\n raise",
"def reset_password(self, old_password, new_password):\n verb = \"POST\"\n url = urljoiner(self.baseurl, [self.path, \"$me\", \"password\"])\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = {\"oldPassword\": old_password,\n \"newPassword\": new_password}\n\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.post(url, data=data, headers=headers)\n self.handle_error_message(r)\n print(\"password successfully reset!\")\n self.auth_data['password'] = new_password\n try:\n self.login()\n except Exception as e:\n pass",
"def test_57_reset_api_key(self):\r\n url = \"/account/johndoe/update\"\r\n # Anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Anonymous user should be redirected for authentication\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n res = self.app.post(url, follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Authenticated user\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n url = \"/account/%s/update\" % user.name\r\n api_key = user.api_key\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should get access to reset api key page\"\r\n assert res.status_code == 200, err_msg\r\n assert \"reset your personal API Key\" in res.data, err_msg\r\n url = \"/account/%s/resetapikey\" % user.name\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should be able to reset his api key\"\r\n assert res.status_code == 200, err_msg\r\n user = db.session.query(User).get(1)\r\n err_msg = \"New generated API key should be different from old one\"\r\n assert api_key != user.api_key, err_msg\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n res = self.app.post(url)\r\n res.status_code == 403\r\n\r\n url = \"/account/fake/resetapikey\"\r\n res = self.app.post(url)\r\n assert res.status_code == 404",
"def reset_password():\n password = request.get_json().get('password')\n access_token = authentication_request()\n\n if access_token:\n # Attempt to decode the token and get the User ID\n user_id = Users.decode_token(access_token)\n if not isinstance(user_id, str):\n user = Users.query.filter_by(id=user_id).first()\n try:\n if not user:\n raise exceptions.NotFound()\n\n valid_password = check_password_validation(password)\n user.password = Users.hash_password(valid_password)\n user.save()\n # db.session.commit()\n return {\"message\": \"you have succesfuly reset your password\"}, status.HTTP_200_OK\n \n except Exception as error:\n \n return {\"message\": str(error)}, status.HTTP_200_OK\n \n else:\n return {\"message\": user_id}, status.HTTP_401_UNAUTHORIZED\n\n return {\"message\": \"Provide a valid authentication token\"}, status.HTTP_403_FORBIDDEN",
"def restore_encryption_password(session, password, return_type=None, **kwargs):\n verify_not_none(password, \"password\")\n\n body_values = {'encryption_pwd': password}\n\n path = '/api/settings/restore_encryption.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def reset_password(self, reset_token_url, password):\n query = parse.parse_qs(parse.urlparse(reset_token_url).query)\n return self._action('resetPasswordSubmitForm', {\n 'token': query['token'][0],\n 'key': query['key'][0],\n 'newpassword': password,\n 'confirmpassword': password,\n }, api='resetpassword')"
] |
[
"0.6997105",
"0.6723147",
"0.6690475",
"0.66852945",
"0.65497524",
"0.6517603",
"0.64377475",
"0.6385964",
"0.6340677",
"0.63359714",
"0.6331445",
"0.6292827",
"0.6283269",
"0.61848646",
"0.6118911",
"0.6083716",
"0.60545266",
"0.6041933",
"0.60399234",
"0.6008904",
"0.60080665",
"0.59345883",
"0.5926198",
"0.5908181",
"0.5908181",
"0.5905525",
"0.5878122",
"0.58711857",
"0.58080655",
"0.58080655",
"0.5797399",
"0.5787324",
"0.5784472",
"0.57836545",
"0.5780661",
"0.5780543",
"0.57759833",
"0.57503647",
"0.57382107",
"0.5737928",
"0.5719094",
"0.5687069",
"0.5652959",
"0.56496173",
"0.5648604",
"0.5646887",
"0.56418407",
"0.56344366",
"0.563306",
"0.5619338",
"0.56156445",
"0.56106406",
"0.55872035",
"0.55870575",
"0.5580057",
"0.55523664",
"0.55488825",
"0.55443925",
"0.55348307",
"0.55206263",
"0.5504914",
"0.5503499",
"0.55010724",
"0.5500116",
"0.54989874",
"0.5498669",
"0.5487697",
"0.54826725",
"0.5476884",
"0.5471437",
"0.54691",
"0.54680777",
"0.5467791",
"0.54648495",
"0.5464255",
"0.54539925",
"0.5452978",
"0.54413295",
"0.54377174",
"0.5431486",
"0.542561",
"0.5425087",
"0.54194903",
"0.5418805",
"0.5418071",
"0.541788",
"0.5415885",
"0.5415531",
"0.541244",
"0.54048276",
"0.54012686",
"0.539942",
"0.53949255",
"0.537653",
"0.5371056",
"0.5366165",
"0.53561676",
"0.5355778",
"0.53359395",
"0.5332606",
"0.5330057"
] |
0.0
|
-1
|
> This operation can reset only the password of the root account of an instance.
|
def reset_account_password(
self,
request: dds_20151201_models.ResetAccountPasswordRequest,
) -> dds_20151201_models.ResetAccountPasswordResponse:
runtime = util_models.RuntimeOptions()
return self.reset_account_password_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset_password():\n pass",
"def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):",
"def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)",
"def reset_password(cursor: Cursor, owner: Owner) -> Result:\n return pgsql.reset_password(cursor, owner_name(owner))",
"def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)",
"def reset_token(sender, instance, **kwargs):\n new_password = instance.password\n\n try:\n old_password = User.objects.get(pk=instance.pk).password\n except User.DoesNotExist:\n old_password = None\n\n if new_password != old_password:\n Token.objects.filter(user=instance).delete()",
"def change_root_password(self, *args, **kwargs):\r\n return execute(self._change_root_password, *args, **kwargs)",
"def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset_ldap_password(username):\n \n from django_ldap_pixiedust.user import SynchronisingUserAdapter\n backend = LDAPBackend()\n user = User.objects.get(username=username)\n ldap_user = backend.get_user(user.id)\n sync = SynchronisingUserAdapter(ldap_user)\n sync.reset_ldap_password()",
"def request_password_reset():",
"def reset_password(connection,password,username):\r\n with connection:\r\n connection.execute(RESET_PASSWORD,(password,username))",
"def reset(email, password):\n try:\n pwd = hash_password(password)\n u = User.query.filter(User.email == email).first()\n u.password = pwd\n try:\n db.session.commit()\n print('User password has been reset successfully.')\n except:\n db.session.rollback()\n except Exception as e:\n print('Error resetting user password: %s' % e)",
"def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1",
"def reset(name, runas=None):\n return prlctl(\"reset\", salt.utils.data.decode(name), runas=runas)",
"def reset_password(newpass, challenge):",
"def password_reset(self, password, vtoken, welcomeEmailTemplate = ''):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&vtoken=' + vtoken\n payload = {'password': password}\n url = SECURE_API_URL + \"raas/v1/account/password/reset\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"async def reset_password(\n rb_client: LDAPConnection, smtp_client: RBMail, commit: bool, username: str,\n) -> int:\n async with rb_client.connect() as conn:\n password = generate_passwd(12)\n if commit:\n await conn.modify(\n f\"uid={username},ou=accounts,o=redbrick\",\n {\"userPassword\": [(MODIFY_REPLACE, [password])]},\n )\n results = await conn.search(\n \"ou=accounts,o=redbrick\",\n f\"(|(uid={username})(gecos={username}))\",\n attributes=[\"altmail\"],\n )\n async with smtp_client:\n await smtp_client.send_password_reset(\n results[0][\"attributes\"][\"altmail\"][0], username, password\n )\n print(f\"{username} password has been reset\")\n return 0",
"def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)",
"def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"",
"def reset_password(self, new_password=None):\n if not new_password:\n new_password = pwgen(10)\n pg = postgresql_svc.PostgreSql()\n if pg.master_user.exists():\n pg.master_user.change_role_password(new_password)\n pg.master_user.change_system_password(new_password)\n else:\n pg.create_linux_user(pg.master_user.name, new_password)\n pg.create_pg_role(pg.master_user.name,\n new_password,\n super=True,\n force=False)\n return new_password",
"def setpassword(self, pwd):\n pass",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))",
"def reset_account_password_with_options(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.account_name):\n query['AccountName'] = request.account_name\n if not UtilClient.is_unset(request.account_password):\n query['AccountPassword'] = request.account_password\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ResetAccountPassword',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ResetAccountPasswordResponse(),\n self.call_api(params, req, runtime)\n )",
"def change_password(change_account):\n change_data(change_account, changed_data='password')",
"def test_patient_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/patient/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))",
"def reboot(self, node):",
"def set_admin_password(self, instance, new_pass):\n pass",
"def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200",
"def test_api_user_reset_password_post(self):\n pass",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def test_reset_user(self):\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n data = {\"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def change_password(self, reset_token, new_password_hash):\n try:\n self.logger.debug('change_password running')\n nosqldb = self.pers.nosql_db\n reset_request = nosqldb['passwordResets'].find_one(\n {'resetToken': reset_token}\n )\n\n if reset_request:\n self.logger.debug('reset request match')\n nosqldb['users'].update_one(\n {\n 'username': reset_request['username']\n },\n {\n '$set': {'passwdHash': new_password_hash}\n }\n\n\n )\n else:\n self.logger.debug('reset request mismatch, nothing changed')\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]",
"def test_password_reset(client, models):\n user = models[\"user\"][0]\n encoded_token = user.get_reset_token()\n new_password = \"password\"\n response = client.post(\n f\"/password/reset/{encoded_token}\", json={\"password\": new_password}\n )\n assert response.status_code == 200\n assert user.check_password(new_password)",
"def restart_salt():\n stop_salt()\n start_salt()",
"def reboot_instance(InstanceId=None):\n pass",
"def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))",
"def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()",
"def reset_ilo_credential(self, password):\n acc_uri = '/rest/v1/AccountService/Accounts'\n\n for status, hds, account, memberuri in self._get_collection(acc_uri):\n if account['UserName'] == self.login:\n mod_user = {}\n mod_user['Password'] = password\n status, headers, response = self._rest_patch(memberuri,\n None, mod_user)\n if status != 200:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n return\n\n msg = \"iLO Account with specified username is not found.\"\n raise exception.IloError(msg)",
"def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)",
"def test_reset_passwd_bad_token(self, test_client):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=str(\n create_access_token(identity=UserModel(uuid=uuid.uuid4()))),\n password=\"Azerty!123\"))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['status'] == False",
"def unconfigure_service_password_encryption(device):\n\n try:\n device.configure(\"no service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not unconfigure service password encryption\"\n )",
"def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()",
"def ChangePasswordWithRPToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def test_dietitian_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/dietitian/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reset(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.ResetVM_Task())",
"def reset_server_database_password(self, server_id, database_id):\n response = self._api_request(\n endpoint='application/servers/{}/databases/{}'\n '/reset-password'.format(server_id, database_id),\n mode='POST')\n return response",
"def test_reset_password(self):\n self.register()\n response = self.client.put(self.create_url(\"chegemaggie1@gmail.com\"),\n self.test_update_password_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_account_modification_superuser_wrong_pw(flask_server, create_account):\n import requests\n\n config = flask_server\n data = {\n 'superuserpassword': '123',\n 'name': create_account['name'],\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'Wrong superuserpassword'\n assert req.status_code == 400",
"def test_password_modify_extop(self):\n user_dn = LDAPDN(\"cn=skip,ou=nerdherd,dc=bonsai,dc=test\")\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"p@ssword\"))\n conn = cli.connect()\n self.assertRaises(TypeError,\n lambda: conn.modify_password(new_password=0))\n conn.modify_password(user_dn, \"newpassword\", \"p@ssword\")\n conn.close()\n self.assertRaises(ClosedConnection, conn.modify_password)\n try:\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"newpassword\"))\n cli.set_password_policy(True)\n conn, ctrl = cli.connect()\n newpass = conn.modify_password()\n conn.close()\n self.assertIsInstance(newpass, str)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), newpass))\n conn, ctrl = cli.connect()\n conn.close()\n except bonsai.AuthenticationError:\n self.fail(\"Failed to authenticate with the new password.\")\n finally:\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"userPassword\"])[0]\n entry['userPassword'] = \"p@ssword\"\n entry.modify()\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"pwdChangeTime\",\n \"pwdGraceUseTime\"])[0]\n if (\"pwdChangeTime\", \"pwdGraceUseTime\") in entry.keys():\n del entry['pwdChangeTime']\n del entry['pwdGraceUseTime']\n entry.modify()",
"def reset_password(token_value, password, confirm_password):\n user = api.token.find_key_by_token(\"password_reset\", token_value)\n if user is None:\n raise PicoException(\"Invalid password reset token\", 422)\n api.user.update_password_request(\n {\"new-password\": password, \"new-password-confirmation\": confirm_password},\n uid=user[\"uid\"],\n )\n\n api.token.delete_token({\"uid\": user[\"uid\"]}, \"password_reset\")",
"def test_reset(u_boot_console):\n\n u_boot_console.run_command('reset', wait_for_prompt=False)\n assert(u_boot_console.validate_exited())",
"def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )",
"def wifi_password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def account_api_password_reset(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_reset.html')\n profile = request.user\n modified = profile.has_usable_api_password()\n api_password = User.objects.make_random_password(settings.API_PASSWORD_LENGTH, settings.API_PASSWORD_CHARACTERS)\n profile.set_api_password(api_password)\n profile.save()\n profile.log_change(request.user, \"Generated new api password.\")\n return render(request, 'agda/account/api_password_reset.html', dict(api_password=api_password, modified=modified))",
"def test_aws_service_api_vm_password_get(self):\n pass",
"def test_replacePasswordWrong(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n d = account.replacePassword(u'blahblah', u'blah')\n perform()\n perform()\n self.failureResultOf(d, errors.BadCredentials)",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return",
"def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}",
"def disable_root_login():\n sudo('passwd --lock root')",
"def test_reset_user_username_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"test_user\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n # NOTE(amelia): Requiring both username and email here may be\n # a slight issue for various UIs as typically a\n # forgotten password screen only asks for the\n # email address, however there isn't a very\n # good way to address this as keystone doesn't\n # store emails in their own field\n # Currently this is an issue for the forked adjutant\n # horizon\n data = {\"email\": \"test@example.com\", \"username\": \"test_user\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Password Reset for OpenStack\")\n self.assertEqual(mail.outbox[0].to[0], \"test@example.com\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def test_forced_password_change(self):\r\n\r\n student_email, student_password = self._setup_user()\r\n staff_email, staff_password = self._setup_user(is_staff=True)\r\n\r\n self._login(student_email, student_password)\r\n self._login(staff_email, staff_password)\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=1)\r\n with freeze_time(staff_reset_time):\r\n self._login(student_email, student_password)\r\n\r\n # staff should fail because password expired\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n\r\n # if we reset the password, we should be able to log in\r\n self._update_password(staff_email, \"updated\")\r\n self._login(staff_email, \"updated\")\r\n\r\n student_reset_time = timezone.now() + timedelta(days=5)\r\n with freeze_time(student_reset_time):\r\n # Both staff and student logins should fail because user must\r\n # reset the password\r\n\r\n self._login(student_email, student_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(student_email, \"updated\")\r\n self._login(student_email, \"updated\")\r\n\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(staff_email, \"updated2\")\r\n self._login(staff_email, \"updated2\")",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def root_password(self) -> str:\n return pulumi.get(self, \"root_password\")",
"def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")",
"def change_password(reset_code):\n return dict(reset_code=reset_code)",
"def test_set_user_password(self):\n pass",
"def reset_password(token, new_password):\r\n\r\n\t\tuser = AuthTools.get_user_from_token(token, AuthTools.password_salt)\r\n\r\n\t\tif user is not None:\r\n\t\t\tuser.set_password(new_password)\r\n\t\t\tuser.save()\r\n\t\t\treturn user\r\n\r\n\t\treturn None",
"def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data",
"def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))",
"def test_setPassword(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.setPassword(u'blahblah')\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)\n d = self._requestAvatarId(UsernamePassword(username, account.passwordHash))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)",
"def reset_password(self, login):\n users = self.search([('login', '=', login)])\n if not users:\n users = self.search([('email', '=', login)])\n if len(users) != 1 or (users.state != 'active' or users.approval_status == 'rejected'):\n raise Exception(_('Reset password: invalid username or email'))\n return users.action_reset_password()",
"def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()",
"def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")",
"def password(self, oid):\n path = '/users/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack user: %s' % truncate(res))\n return res[0]['server']",
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def reset_password(token, new_password):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n user = User.query.get(data.get(\"reset\"))\n if user is None:\n return False\n user.password = new_password\n db.session.add(user)\n return True",
"async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}",
"def test_user_resetpassword(self):\n\n data = {\n \"email\": \"testuser@gmail.com\"\n }\n response = self.client.post(reverse('account:request-reset-email'), data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(email=\"testuser@gmail.com\")\n uidb64 = urlsafe_base64_encode(smart_bytes(user.id))\n token = PasswordResetTokenGenerator().make_token(user)\n relativeLink = reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n )\n resetconfirm_response = self.client.get(reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n ))\n\n self.assertEqual(resetconfirm_response.status_code, status.HTTP_200_OK)\n reset_data = {\n \"uidb64\": resetconfirm_response.data[\"uidb64\"],\n \"token\": resetconfirm_response.data[\"token\"],\n \"password\": \"1234567\"\n }\n resetcomplete_response = self.client.patch(reverse('account:password-reset-complete'), reset_data,\n format=\"json\")\n self.assertEqual(resetcomplete_response.status_code, status.HTTP_200_OK)\n # test if now user can login with new password\n updatedlogin_data = {\n \"email\": \"testuser@gmail.com\",\n \"password\": \"1234567\"\n }\n\n response = self.client.post(self.login_url, updatedlogin_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test if user can login with old password\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)",
"def reset_chroot(self):\n try:\n if self.HAS_CHROOT:\n task = reset_ldap_users.post()\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=120000)\n except Exception, e:\n print \"Exception reseting chroot home folders.\"\n raise",
"def reset_password(self, old_password, new_password):\n verb = \"POST\"\n url = urljoiner(self.baseurl, [self.path, \"$me\", \"password\"])\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = {\"oldPassword\": old_password,\n \"newPassword\": new_password}\n\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.post(url, data=data, headers=headers)\n self.handle_error_message(r)\n print(\"password successfully reset!\")\n self.auth_data['password'] = new_password\n try:\n self.login()\n except Exception as e:\n pass",
"def reset_password():\n password = request.get_json().get('password')\n access_token = authentication_request()\n\n if access_token:\n # Attempt to decode the token and get the User ID\n user_id = Users.decode_token(access_token)\n if not isinstance(user_id, str):\n user = Users.query.filter_by(id=user_id).first()\n try:\n if not user:\n raise exceptions.NotFound()\n\n valid_password = check_password_validation(password)\n user.password = Users.hash_password(valid_password)\n user.save()\n # db.session.commit()\n return {\"message\": \"you have succesfuly reset your password\"}, status.HTTP_200_OK\n \n except Exception as error:\n \n return {\"message\": str(error)}, status.HTTP_200_OK\n \n else:\n return {\"message\": user_id}, status.HTTP_401_UNAUTHORIZED\n\n return {\"message\": \"Provide a valid authentication token\"}, status.HTTP_403_FORBIDDEN",
"def test_57_reset_api_key(self):\r\n url = \"/account/johndoe/update\"\r\n # Anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Anonymous user should be redirected for authentication\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n res = self.app.post(url, follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Authenticated user\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n url = \"/account/%s/update\" % user.name\r\n api_key = user.api_key\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should get access to reset api key page\"\r\n assert res.status_code == 200, err_msg\r\n assert \"reset your personal API Key\" in res.data, err_msg\r\n url = \"/account/%s/resetapikey\" % user.name\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should be able to reset his api key\"\r\n assert res.status_code == 200, err_msg\r\n user = db.session.query(User).get(1)\r\n err_msg = \"New generated API key should be different from old one\"\r\n assert api_key != user.api_key, err_msg\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n res = self.app.post(url)\r\n res.status_code == 403\r\n\r\n url = \"/account/fake/resetapikey\"\r\n res = self.app.post(url)\r\n assert res.status_code == 404",
"def restore_encryption_password(session, password, return_type=None, **kwargs):\n verify_not_none(password, \"password\")\n\n body_values = {'encryption_pwd': password}\n\n path = '/api/settings/restore_encryption.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)"
] |
[
"0.6995449",
"0.6721432",
"0.6687601",
"0.6683846",
"0.65477824",
"0.6516542",
"0.6435638",
"0.63833916",
"0.6339373",
"0.6333628",
"0.6329721",
"0.62919116",
"0.62820923",
"0.61816514",
"0.6117711",
"0.6081572",
"0.60414654",
"0.60382646",
"0.60087836",
"0.6008431",
"0.59336513",
"0.59251386",
"0.5906148",
"0.5906148",
"0.59039086",
"0.58764625",
"0.58674634",
"0.5806162",
"0.5806162",
"0.57951725",
"0.5785506",
"0.57824266",
"0.57816637",
"0.57783586",
"0.57774836",
"0.5775901",
"0.57467026",
"0.5737608",
"0.5735618",
"0.571653",
"0.56865793",
"0.5650088",
"0.5647733",
"0.5646782",
"0.56457967",
"0.5640421",
"0.56334937",
"0.5631319",
"0.56161225",
"0.56140697",
"0.5608202",
"0.55860347",
"0.55853844",
"0.55786425",
"0.5550942",
"0.55463994",
"0.5542332",
"0.5535023",
"0.5520207",
"0.5502748",
"0.55018586",
"0.54994506",
"0.54992926",
"0.54982114",
"0.5497104",
"0.5485136",
"0.54799515",
"0.54755557",
"0.5467995",
"0.5466483",
"0.54646856",
"0.5464552",
"0.5464163",
"0.5462917",
"0.54525423",
"0.5451178",
"0.5438148",
"0.54353017",
"0.54286844",
"0.542352",
"0.5421462",
"0.54176265",
"0.5417204",
"0.5416352",
"0.5416148",
"0.54144216",
"0.5412356",
"0.5408199",
"0.5403121",
"0.53998756",
"0.53985876",
"0.539254",
"0.5375197",
"0.5368484",
"0.5363252",
"0.5356125",
"0.5354908",
"0.53346187",
"0.53304166",
"0.53296465"
] |
0.6053006
|
16
|
> This operation can reset only the password of the root account of an instance.
|
async def reset_account_password_async(
self,
request: dds_20151201_models.ResetAccountPasswordRequest,
) -> dds_20151201_models.ResetAccountPasswordResponse:
runtime = util_models.RuntimeOptions()
return await self.reset_account_password_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reset_password():\n pass",
"def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):",
"def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)",
"def reset_password(cursor: Cursor, owner: Owner) -> Result:\n return pgsql.reset_password(cursor, owner_name(owner))",
"def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)",
"def reset_token(sender, instance, **kwargs):\n new_password = instance.password\n\n try:\n old_password = User.objects.get(pk=instance.pk).password\n except User.DoesNotExist:\n old_password = None\n\n if new_password != old_password:\n Token.objects.filter(user=instance).delete()",
"def change_root_password(self, *args, **kwargs):\r\n return execute(self._change_root_password, *args, **kwargs)",
"def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset_ldap_password(username):\n \n from django_ldap_pixiedust.user import SynchronisingUserAdapter\n backend = LDAPBackend()\n user = User.objects.get(username=username)\n ldap_user = backend.get_user(user.id)\n sync = SynchronisingUserAdapter(ldap_user)\n sync.reset_ldap_password()",
"def request_password_reset():",
"def reset_password(connection,password,username):\r\n with connection:\r\n connection.execute(RESET_PASSWORD,(password,username))",
"def reset(email, password):\n try:\n pwd = hash_password(password)\n u = User.query.filter(User.email == email).first()\n u.password = pwd\n try:\n db.session.commit()\n print('User password has been reset successfully.')\n except:\n db.session.rollback()\n except Exception as e:\n print('Error resetting user password: %s' % e)",
"def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1",
"def reset(name, runas=None):\n return prlctl(\"reset\", salt.utils.data.decode(name), runas=runas)",
"def reset_password(newpass, challenge):",
"def reset_account_password(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n runtime = util_models.RuntimeOptions()\n return self.reset_account_password_with_options(request, runtime)",
"def password_reset(self, password, vtoken, welcomeEmailTemplate = ''):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&vtoken=' + vtoken\n payload = {'password': password}\n url = SECURE_API_URL + \"raas/v1/account/password/reset\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)",
"def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"async def reset_password(\n rb_client: LDAPConnection, smtp_client: RBMail, commit: bool, username: str,\n) -> int:\n async with rb_client.connect() as conn:\n password = generate_passwd(12)\n if commit:\n await conn.modify(\n f\"uid={username},ou=accounts,o=redbrick\",\n {\"userPassword\": [(MODIFY_REPLACE, [password])]},\n )\n results = await conn.search(\n \"ou=accounts,o=redbrick\",\n f\"(|(uid={username})(gecos={username}))\",\n attributes=[\"altmail\"],\n )\n async with smtp_client:\n await smtp_client.send_password_reset(\n results[0][\"attributes\"][\"altmail\"][0], username, password\n )\n print(f\"{username} password has been reset\")\n return 0",
"def reset_password():\r\n key = request.args.get('key')\r\n if key is None:\r\n abort(403)\r\n userdict = {}\r\n try:\r\n userdict = signer.signer.loads(key, max_age=3600, salt='password-reset')\r\n except BadData:\r\n abort(403)\r\n username = userdict.get('user')\r\n if not username or not userdict.get('password'):\r\n abort(403)\r\n user = model.user.User.query.filter_by(name=username).first_or_404()\r\n if user.passwd_hash != userdict.get('password'):\r\n abort(403)\r\n form = ChangePasswordForm(request.form)\r\n if form.validate_on_submit():\r\n user.set_password(form.new_password.data)\r\n db.session.add(user)\r\n db.session.commit()\r\n login_user(user)\r\n flash(gettext('You reset your password successfully!'), 'success')\r\n return redirect(url_for('.signin'))\r\n if request.method == 'POST' and not form.validate():\r\n flash(gettext('Please correct the errors'), 'error')\r\n return render_template('/account/password_reset.html', form=form)",
"def resetPassword(self, email):\n\t\turl = \"https://habitica.com/api/v3/user/auth/reset-password\"\n\t\tpayload ={\"email\": email}\n\t\treturn(postUrl(url, self.credentials, payload))",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def sudo_restart ( self, ):\r\n pass\r\n \"sudo reboot\"",
"def reset_password(self, new_password=None):\n if not new_password:\n new_password = pwgen(10)\n pg = postgresql_svc.PostgreSql()\n if pg.master_user.exists():\n pg.master_user.change_role_password(new_password)\n pg.master_user.change_system_password(new_password)\n else:\n pg.create_linux_user(pg.master_user.name, new_password)\n pg.create_pg_role(pg.master_user.name,\n new_password,\n super=True,\n force=False)\n return new_password",
"def setpassword(self, pwd):\n pass",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def reset_password(self, uid, password):\n return self.enable_user(uid, password)",
"def test_reset_password(self):\n\n dietitian = Dietitian.query.get(1)\n reset_password(\"newpass\", dietitian)\n\n self.assertEqual(True, dietitian.check_password(\"newpass\"))",
"def reset_account_password_with_options(\n self,\n request: dds_20151201_models.ResetAccountPasswordRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.ResetAccountPasswordResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.account_name):\n query['AccountName'] = request.account_name\n if not UtilClient.is_unset(request.account_password):\n query['AccountPassword'] = request.account_password\n if not UtilClient.is_unset(request.character_type):\n query['CharacterType'] = request.character_type\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ResetAccountPassword',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.ResetAccountPasswordResponse(),\n self.call_api(params, req, runtime)\n )",
"def test_patient_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/patient/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def change_password(change_account):\n change_data(change_account, changed_data='password')",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def change_password(host, username, password):\r\n # type: (Docker, str, str) -> None\r\n host.cmd(\"echo '%s:%s' | chpasswd\" % (username, password))",
"def reboot(self, node):",
"def set_admin_password(self, instance, new_pass):\n pass",
"def reset_password():\n body = request.get_json()\n reset_token = body.get('reset_token')\n password = body.get('password')\n\n if not reset_token or not password:\n return jsonify(msg.MISSING_PARAMETER), 400\n\n user_email = decode_token(reset_token)['identity']\n is_changed = views.UserManagement().change_password(email=user_email, password=password)\n if not is_changed:\n return jsonify(msg.NO_DATA), 404\n\n send_email('[Shodita] Password reset successful', sender='shodita@shodita.com', recipients=[user_email],\n text_body='Password reset was successful', html_body='<p>Password reset was successful</p>')\n\n return jsonify(msg.SUCCESS), 200",
"def test_api_user_reset_password_post(self):\n pass",
"def new_password(self):\n # create new password\n return password_generator.create_password()\n # have password reset",
"def test_reset_user(self):\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n data = {\"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def change_password(self, reset_token, new_password_hash):\n try:\n self.logger.debug('change_password running')\n nosqldb = self.pers.nosql_db\n reset_request = nosqldb['passwordResets'].find_one(\n {'resetToken': reset_token}\n )\n\n if reset_request:\n self.logger.debug('reset request match')\n nosqldb['users'].update_one(\n {\n 'username': reset_request['username']\n },\n {\n '$set': {'passwdHash': new_password_hash}\n }\n\n\n )\n else:\n self.logger.debug('reset request mismatch, nothing changed')\n except Exception as exc:\n self.logger.debug('Unexpected Error %s', str(exc))\n raise",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def test_password_reset(client, models):\n user = models[\"user\"][0]\n encoded_token = user.get_reset_token()\n new_password = \"password\"\n response = client.post(\n f\"/password/reset/{encoded_token}\", json={\"password\": new_password}\n )\n assert response.status_code == 200\n assert user.check_password(new_password)",
"def enable_root_user(self):\n uri = \"/instances/%s/root\" % self.id\n resp, body = self.manager.api.method_post(uri)\n return body[\"user\"][\"password\"]",
"def restart_salt():\n stop_salt()\n start_salt()",
"def reboot_instance(InstanceId=None):\n pass",
"def test_user_changed_password(self):\n\n form_data = {\n 'token': self.token.key,\n 'password': 'aaaaaa',\n 'password_confirmation': 'aaaaaa'\n }\n form = ResetPasswordForm(form_data)\n form.submit()\n self.user.refresh_from_db()\n self.assertTrue(self.user.check_password(form_data['password']))",
"def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()",
"def reset_ilo_credential(self, password):\n acc_uri = '/rest/v1/AccountService/Accounts'\n\n for status, hds, account, memberuri in self._get_collection(acc_uri):\n if account['UserName'] == self.login:\n mod_user = {}\n mod_user['Password'] = password\n status, headers, response = self._rest_patch(memberuri,\n None, mod_user)\n if status != 200:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n return\n\n msg = \"iLO Account with specified username is not found.\"\n raise exception.IloError(msg)",
"def reset_pass(key):\n form = NewPasswordForm()\n form.key.data = key\n\n if form.validate_on_submit():\n form.user.set_password(form.password.data)\n db.session.delete(form.pw_reset)\n db.session.commit()\n\n flash('Your password has been successfully reset', 'alert-success')\n login_user(form.user)\n return redirect(url_for('default.home'))\n else:\n flash_form_errors(form)\n form.key.data = key\n # NOTE: This render_template is causing a 404\n return render_template('reset_pass.html', form=form, key=key)",
"def test_reset_passwd_bad_token(self, test_client):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=str(\n create_access_token(identity=UserModel(uuid=uuid.uuid4()))),\n password=\"Azerty!123\"))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['status'] == False",
"def unconfigure_service_password_encryption(device):\n\n try:\n device.configure(\"no service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not unconfigure service password encryption\"\n )",
"def reset_user_password_service(user: User, password: str) -> None:\n hashed_password = bcrypt.generate_password_hash(password).decode('UTF-8')\n user.password = hashed_password\n db.session.commit()",
"def ChangePasswordWithRPToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def test_dietitian_reset_password(self):\n\n data = {\"password\": \"newpass\"}\n result = self.client.post(\"/dietitian/1/account/reset-password\", \n data=data, follow_redirects=True)\n\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"successfully reset\", result.data)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reset(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.ResetVM_Task())",
"def reset_server_database_password(self, server_id, database_id):\n response = self._api_request(\n endpoint='application/servers/{}/databases/{}'\n '/reset-password'.format(server_id, database_id),\n mode='POST')\n return response",
"def test_reset_password(self):\n self.register()\n response = self.client.put(self.create_url(\"chegemaggie1@gmail.com\"),\n self.test_update_password_data,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def reset_password(token_value, password, confirm_password):\n user = api.token.find_key_by_token(\"password_reset\", token_value)\n if user is None:\n raise PicoException(\"Invalid password reset token\", 422)\n api.user.update_password_request(\n {\"new-password\": password, \"new-password-confirmation\": confirm_password},\n uid=user[\"uid\"],\n )\n\n api.token.delete_token({\"uid\": user[\"uid\"]}, \"password_reset\")",
"def test_reset(u_boot_console):\n\n u_boot_console.run_command('reset', wait_for_prompt=False)\n assert(u_boot_console.validate_exited())",
"def test_account_modification_superuser_wrong_pw(flask_server, create_account):\n import requests\n\n config = flask_server\n data = {\n 'superuserpassword': '123',\n 'name': create_account['name'],\n 'new_name': 'foo2',\n 'new_password': 'bar2',\n 'new_code': '456',\n }\n\n req = requests.post('{}/account/modify'.format(API_URL), data=data)\n assert req.content == b'Wrong superuserpassword'\n assert req.status_code == 400",
"def test_password_modify_extop(self):\n user_dn = LDAPDN(\"cn=skip,ou=nerdherd,dc=bonsai,dc=test\")\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"p@ssword\"))\n conn = cli.connect()\n self.assertRaises(TypeError,\n lambda: conn.modify_password(new_password=0))\n conn.modify_password(user_dn, \"newpassword\", \"p@ssword\")\n conn.close()\n self.assertRaises(ClosedConnection, conn.modify_password)\n try:\n cli.set_credentials(\"SIMPLE\", (str(user_dn), \"newpassword\"))\n cli.set_password_policy(True)\n conn, ctrl = cli.connect()\n newpass = conn.modify_password()\n conn.close()\n self.assertIsInstance(newpass, str)\n cli.set_credentials(\"SIMPLE\", (str(user_dn), newpass))\n conn, ctrl = cli.connect()\n conn.close()\n except bonsai.AuthenticationError:\n self.fail(\"Failed to authenticate with the new password.\")\n finally:\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"userPassword\"])[0]\n entry['userPassword'] = \"p@ssword\"\n entry.modify()\n entry = self.conn.search(user_dn, 0,\n attrlist=[\"pwdChangeTime\",\n \"pwdGraceUseTime\"])[0]\n if (\"pwdChangeTime\", \"pwdGraceUseTime\") in entry.keys():\n del entry['pwdChangeTime']\n del entry['pwdGraceUseTime']\n entry.modify()",
"def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )",
"def wifi_password(self):\n raise RuntimeError(\"Password can not be read, only set\")",
"def account_api_password_reset(request):\n if request.method != 'POST':\n return render(request, 'agda/account/api_password_reset.html')\n profile = request.user\n modified = profile.has_usable_api_password()\n api_password = User.objects.make_random_password(settings.API_PASSWORD_LENGTH, settings.API_PASSWORD_CHARACTERS)\n profile.set_api_password(api_password)\n profile.save()\n profile.log_change(request.user, \"Generated new api password.\")\n return render(request, 'agda/account/api_password_reset.html', dict(api_password=api_password, modified=modified))",
"def test_aws_service_api_vm_password_get(self):\n pass",
"def reset_password():\n json_data = request.get_json()\n user_email = json_data.get('email') or None\n\n if user_email is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n user_account = db.session.query(UserAccount).filter(\n UserAccount.email == user_email).first()\n if user_account is None:\n raise BadRequest(description=INCORRECT_RESET_PARAMS_MSG)\n\n # Generate password hash\n temp_password = str(random.randint(10000,99999))\n update_user = {'password_hashed': get_hashed_password(temp_password)}\n user_account.update(**update_user)\n user_account.save()\n\n email.send('reset_password', user_email, temp_password)\n\n return {'status_code': 200, 'message': 'Password reset success!'}",
"def test_replacePasswordWrong(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n d = account.replacePassword(u'blahblah', u'blah')\n perform()\n perform()\n self.failureResultOf(d, errors.BadCredentials)",
"def print_password_change_required_and_logout( context, args ):\n\n print( \"Password change required. To set a new password, run the following:\" )\n print( \"rf_accounts.py -r {} -u {} -p <old password> --setpassword {} <new password>\".format( args.rhost, args.user, args.user ) )\n logout( context, ignore_error = True ) # Some services do not allow session logout in this condition\n return",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def disable_root_login():\n sudo('passwd --lock root')",
"def test_reset_user_username_not_email(self):\n\n user = fake_clients.FakeUser(\n name=\"test_user\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/ResetPassword\"\n # NOTE(amelia): Requiring both username and email here may be\n # a slight issue for various UIs as typically a\n # forgotten password screen only asks for the\n # email address, however there isn't a very\n # good way to address this as keystone doesn't\n # store emails in their own field\n # Currently this is an issue for the forked adjutant\n # horizon\n data = {\"email\": \"test@example.com\", \"username\": \"test_user\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].subject, \"Password Reset for OpenStack\")\n self.assertEqual(mail.outbox[0].to[0], \"test@example.com\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"new_test_password\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password\")",
"def test_forced_password_change(self):\r\n\r\n student_email, student_password = self._setup_user()\r\n staff_email, staff_password = self._setup_user(is_staff=True)\r\n\r\n self._login(student_email, student_password)\r\n self._login(staff_email, staff_password)\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=1)\r\n with freeze_time(staff_reset_time):\r\n self._login(student_email, student_password)\r\n\r\n # staff should fail because password expired\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n\r\n # if we reset the password, we should be able to log in\r\n self._update_password(staff_email, \"updated\")\r\n self._login(staff_email, \"updated\")\r\n\r\n student_reset_time = timezone.now() + timedelta(days=5)\r\n with freeze_time(student_reset_time):\r\n # Both staff and student logins should fail because user must\r\n # reset the password\r\n\r\n self._login(student_email, student_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(student_email, \"updated\")\r\n self._login(student_email, \"updated\")\r\n\r\n self._login(staff_email, staff_password, should_succeed=False,\r\n err_msg_check=\"Your password has expired due to password policy on this account\")\r\n self._update_password(staff_email, \"updated2\")\r\n self._login(staff_email, \"updated2\")",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def root_password(self) -> str:\n return pulumi.get(self, \"root_password\")",
"def test_reset_password_fails_for_similar_passwords(self):\n self.test_client.post(\n \"/api/v1/auth/register\", data=self.user_data)\n\n resp = self.test_client.post(\n \"/api/v1/auth/login\",\n data=self.user_data)\n data = json.loads(resp.data)\n\n # reset-password should pass provided the new password\n # is not similar to the old saved password\n token = data['token']\n resp = self.test_client.post(\n \"/api/v1/auth/reset-password\",\n headers=dict(Authorization=f\"Bearer {token}\"),\n data={'password': '!0ctoPus', 'confirm password': '!0ctoPus'}\n )\n\n self.assertEqual(resp.status_code, 400)\n data = json.loads(resp.data)\n self.assertEqual(data[\"status\"], \"failure\")\n self.assertEqual(\n data[\"message\"],\n \"Your new password should not be similar to your old password\")",
"def change_password(reset_code):\n return dict(reset_code=reset_code)",
"def test_set_user_password(self):\n pass",
"def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data",
"def reset_password(token, new_password):\r\n\r\n\t\tuser = AuthTools.get_user_from_token(token, AuthTools.password_salt)\r\n\r\n\t\tif user is not None:\r\n\t\t\tuser.set_password(new_password)\r\n\t\t\tuser.save()\r\n\t\t\treturn user\r\n\r\n\t\treturn None",
"def test_mod_password(self, mapp, existing_user_id):\n mapp.logoff()\n mapp.login(user=existing_user_id, password=\"1234\")\n mapp.modify_user(user = existing_user_id, password = id(self))\n # Verify that the password was indeed changed.\n mapp.logoff()\n mapp.login(user=existing_user_id,\n password=\"1234\", code = 401)\n mapp.login(user=existing_user_id, password=id(self))",
"def reset_password(self, login):\n users = self.search([('login', '=', login)])\n if not users:\n users = self.search([('email', '=', login)])\n if len(users) != 1 or (users.state != 'active' or users.approval_status == 'rejected'):\n raise Exception(_('Reset password: invalid username or email'))\n return users.action_reset_password()",
"def test_setPassword(self):\n self.realm._txCryptContext, perform = getTestContext()\n account = self.realm.addAccount(\n self.localpart, self.domain, self.password)\n username = u'%s@%s' % (self.localpart, self.domain)\n d = account.setPassword(u'blahblah')\n perform()\n self.successResultOf(d)\n d = self._requestAvatarId(UsernamePassword(username, u'blahblah'))\n perform()\n self.assertEquals(self.successResultOf(d), account.storeID)\n d = self._requestAvatarId(UsernamePassword(username, self.password))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)\n d = self._requestAvatarId(UsernamePassword(username, account.passwordHash))\n perform()\n self.failureResultOf(d, UnauthorizedLogin)",
"def change_password(self):\n self.test_user.set_password(self.create_user_data()['password1'])\n self.test_user.save()",
"def change_password(self, new_password):\n dev = self.nearest_pandevice()\n self.password_hash = dev.request_password_hash(new_password)\n self.update(\"password_hash\")",
"def password(self, oid):\n path = '/users/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack user: %s' % truncate(res))\n return res[0]['server']",
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def reset_password(token, new_password):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n user = User.query.get(data.get(\"reset\"))\n if user is None:\n return False\n user.password = new_password\n db.session.add(user)\n return True",
"async def user_change_password(\n form: ChangePasswordRequest,\n db: Session = Depends(db_session)):\n token: AccessToken = find_ot_access_token(db, form.token)\n if not token:\n return {\"success\": False, \"msg\": \"Token was not found\"}\n\n token.user.hashed_password = PWD_CONTEXT.hash(form.password)\n db.delete(token)\n db.commit()\n return {\"success\": True}",
"def test_user_resetpassword(self):\n\n data = {\n \"email\": \"testuser@gmail.com\"\n }\n response = self.client.post(reverse('account:request-reset-email'), data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n user = User.objects.get(email=\"testuser@gmail.com\")\n uidb64 = urlsafe_base64_encode(smart_bytes(user.id))\n token = PasswordResetTokenGenerator().make_token(user)\n relativeLink = reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n )\n resetconfirm_response = self.client.get(reverse(\n 'accounts:password-reset-confirm',\n kwargs={'uidb64': uidb64, 'token': token}\n ))\n\n self.assertEqual(resetconfirm_response.status_code, status.HTTP_200_OK)\n reset_data = {\n \"uidb64\": resetconfirm_response.data[\"uidb64\"],\n \"token\": resetconfirm_response.data[\"token\"],\n \"password\": \"1234567\"\n }\n resetcomplete_response = self.client.patch(reverse('account:password-reset-complete'), reset_data,\n format=\"json\")\n self.assertEqual(resetcomplete_response.status_code, status.HTTP_200_OK)\n # test if now user can login with new password\n updatedlogin_data = {\n \"email\": \"testuser@gmail.com\",\n \"password\": \"1234567\"\n }\n\n response = self.client.post(self.login_url, updatedlogin_data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # test if user can login with old password\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)",
"def reset_chroot(self):\n try:\n if self.HAS_CHROOT:\n task = reset_ldap_users.post()\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=120000)\n except Exception, e:\n print \"Exception reseting chroot home folders.\"\n raise",
"def reset_password(self, old_password, new_password):\n verb = \"POST\"\n url = urljoiner(self.baseurl, [self.path, \"$me\", \"password\"])\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n data = {\"oldPassword\": old_password,\n \"newPassword\": new_password}\n\n if(self.debug):\n print(verb + \" \" + url)\n r = requests.post(url, data=data, headers=headers)\n self.handle_error_message(r)\n print(\"password successfully reset!\")\n self.auth_data['password'] = new_password\n try:\n self.login()\n except Exception as e:\n pass",
"def reset_password():\n password = request.get_json().get('password')\n access_token = authentication_request()\n\n if access_token:\n # Attempt to decode the token and get the User ID\n user_id = Users.decode_token(access_token)\n if not isinstance(user_id, str):\n user = Users.query.filter_by(id=user_id).first()\n try:\n if not user:\n raise exceptions.NotFound()\n\n valid_password = check_password_validation(password)\n user.password = Users.hash_password(valid_password)\n user.save()\n # db.session.commit()\n return {\"message\": \"you have succesfuly reset your password\"}, status.HTTP_200_OK\n \n except Exception as error:\n \n return {\"message\": str(error)}, status.HTTP_200_OK\n \n else:\n return {\"message\": user_id}, status.HTTP_401_UNAUTHORIZED\n\n return {\"message\": \"Provide a valid authentication token\"}, status.HTTP_403_FORBIDDEN",
"def test_57_reset_api_key(self):\r\n url = \"/account/johndoe/update\"\r\n # Anonymous user\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Anonymous user should be redirected for authentication\"\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n res = self.app.post(url, follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, err_msg\r\n\r\n # Authenticated user\r\n self.register()\r\n user = db.session.query(User).get(1)\r\n url = \"/account/%s/update\" % user.name\r\n api_key = user.api_key\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should get access to reset api key page\"\r\n assert res.status_code == 200, err_msg\r\n assert \"reset your personal API Key\" in res.data, err_msg\r\n url = \"/account/%s/resetapikey\" % user.name\r\n res = self.app.post(url, follow_redirects=True)\r\n err_msg = \"Authenticated user should be able to reset his api key\"\r\n assert res.status_code == 200, err_msg\r\n user = db.session.query(User).get(1)\r\n err_msg = \"New generated API key should be different from old one\"\r\n assert api_key != user.api_key, err_msg\r\n\r\n self.register(fullname=\"new\", name=\"new\")\r\n res = self.app.post(url)\r\n res.status_code == 403\r\n\r\n url = \"/account/fake/resetapikey\"\r\n res = self.app.post(url)\r\n assert res.status_code == 404",
"def restore_encryption_password(session, password, return_type=None, **kwargs):\n verify_not_none(password, \"password\")\n\n body_values = {'encryption_pwd': password}\n\n path = '/api/settings/restore_encryption.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)"
] |
[
"0.69956654",
"0.67230797",
"0.6687879",
"0.6683989",
"0.6548391",
"0.65178514",
"0.64349926",
"0.63839984",
"0.6341612",
"0.63333577",
"0.63292885",
"0.6291432",
"0.62829876",
"0.61829466",
"0.6119637",
"0.60824883",
"0.60539484",
"0.6043146",
"0.60395235",
"0.6010371",
"0.60076326",
"0.59354216",
"0.5926945",
"0.5908053",
"0.5908053",
"0.5905139",
"0.58766025",
"0.58663386",
"0.58063203",
"0.58063203",
"0.5794852",
"0.57856363",
"0.5782536",
"0.57809275",
"0.5779705",
"0.57757837",
"0.57753783",
"0.5745599",
"0.57392025",
"0.5736313",
"0.571532",
"0.56875706",
"0.5651021",
"0.56496733",
"0.5646251",
"0.5645545",
"0.56409883",
"0.5634674",
"0.5632102",
"0.5617772",
"0.56133497",
"0.5610055",
"0.55877244",
"0.55858344",
"0.557865",
"0.5550226",
"0.5544413",
"0.5542736",
"0.5536433",
"0.5523025",
"0.5502704",
"0.5501944",
"0.55003035",
"0.54994327",
"0.549885",
"0.54970044",
"0.5485407",
"0.5478054",
"0.5475548",
"0.5466265",
"0.5466167",
"0.54660267",
"0.546486",
"0.54633504",
"0.5462322",
"0.5453584",
"0.5451868",
"0.5436614",
"0.5434345",
"0.54299897",
"0.54238874",
"0.542001",
"0.5419318",
"0.54180163",
"0.54161704",
"0.5415285",
"0.5414283",
"0.5411719",
"0.54072267",
"0.5401832",
"0.5400139",
"0.539868",
"0.53914976",
"0.537595",
"0.53698003",
"0.53646517",
"0.5356222",
"0.53559124",
"0.533519",
"0.5332039",
"0.5331101"
] |
0.0
|
-1
|
This operation can also be used to restart a shard or mongos node in a sharded cluster instance.
|
def restart_dbinstance_with_options(
self,
request: dds_20151201_models.RestartDBInstanceRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.RestartDBInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='RestartDBInstance',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.RestartDBInstanceResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def restart_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Restarting cluster\"\n return ret\n\n __salt__[\"trafficserver.restart_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Restarted cluster\"\n return ret",
"def restart_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/restart').format(self.api_version,\n cluster_name,\n service_name)).json()",
"def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def cluster_reboot(cluster):\n map(reboot, cluster)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def restart(name, runas=None):\n return prlctl(\"restart\", salt.utils.data.decode(name), runas=runas)",
"def restart_kernel(self, kernel_id, now=False):",
"async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()",
"def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()",
"def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)",
"def reboot(self, node):",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None):\n if ex_cloud_service_name is None:\n if node.extra is not None:\n ex_cloud_service_name = node.extra.get(\"ex_cloud_service_name\")\n\n if not ex_cloud_service_name:\n raise ValueError(\"ex_cloud_service_name is required.\")\n\n if not ex_deployment_slot:\n ex_deployment_slot = \"Production\"\n\n _deployment_name = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n ).name\n\n try:\n response = self._perform_post(\n self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name)\n + \"/roleinstances/\"\n + _str(node.id)\n + \"?comp=reboot\",\n \"\",\n )\n\n self.raise_for_response(response, 202)\n\n if self._parse_response_for_async_op(response):\n return True\n else:\n return False\n except Exception:\n return False",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True",
"def supervisor_restart():\n log('restart supervisor', yellow)\n sudo('/etc/init.d/supervisor stop')\n sudo('/etc/init.d/supervisor start')\n # sudo('/etc/init.d/supervisor restart')",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def cmd_pamaprestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('map_restart', maxRetries=5)",
"def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])",
"def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def restart(*args, **kwargs):\n return restart_type(args, kwargs)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n hass.services.call(DOMAIN, SERVICE_RESTART, data)",
"def restart_kernel(self, now=False, **kw):",
"def restart():\n stop()\n start()",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def restart_game(context: GameContext) -> None:\n left_spaceship, right_spaceship = create_spaceships()\n context.restart(left_spaceship, right_spaceship)",
"def reboot(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.reboot_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(vm_hostname, force=False, no_redefine=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_shutdown()\n vm.aws_start()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not vm.is_running():\n raise InvalidStateError('\"{}\" is not running'.format(vm.fqdn))\n\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n\n if not no_redefine:\n vm.hypervisor.redefine_vm(vm)\n\n vm.start()\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n log.info('\"{}\" is restarted.'.format(vm.fqdn))",
"def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)",
"def reboot(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.reboot_server(server)\n\n return r",
"def Restart(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Restart\", cmd)",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def test_snat_with_vrouter_agent_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_vrouter_agent()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_instance(InstanceId=None):\n pass",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)",
"def test_snat_with_kube_manager_restart(self):\n self.addCleanup(self.invalidate_kube_manager_inspect)\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_kube_manager()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))",
"async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')",
"def restart_container(self, container_node):\n try:\n print(\"Restarting container: \", container_node.get_container_id())\n container = self.docker_client.containers.get(container_node.get_container_id())\n container.restart()\n return True\n except docker.errors.APIError as de:\n print(\"Error restarting the container\")\n traceback.print_exc()\n print de\n return False",
"def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def restart_service_cmd(klass, service):\n raise NotImplementedError",
"def cmd_pafastrestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Fast restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('fast_restart', maxRetries=5)",
"def RestartDiscovery(self):\n payload = { \"Arg1\": self.href }\n return self._execute('restartDiscovery', payload=payload, response_object=None)",
"def restart():\n run_commands('python manage.py supervisor restart all')",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def restart(self, _id):\n\n try:\n UpstartJob(_id).restart()\n except DBusException as e:\n raise ServiceOperationError(e)",
"def cmd_restart(self, app_name=None):\n rc = self.socket_command_with_project('restart', app_name)\n return rc",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartService(self):\n return self.session.request('diag/service/')",
"def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def restartInstance(self, pid, instanceName, enabled):\n if not (self.enabled and enabled):\n self.log.info(\"Restarting is disabled, please restart %s manually\" % instanceName)\n self.accounting[instanceName][\"Treatment\"] = \"Please restart it manually\"\n return S_OK(NO_RESTART)\n\n try:\n agentProc = psutil.Process(int(pid))\n processesToTerminate = agentProc.children(recursive=True)\n processesToTerminate.append(agentProc)\n\n for proc in processesToTerminate:\n proc.terminate()\n\n _gone, alive = psutil.wait_procs(processesToTerminate, timeout=5,\n callback=partial(self.on_terminate, instanceName))\n for proc in alive:\n self.log.info(\"Forcefully killing process %s\" % proc.pid)\n proc.kill()\n\n return S_OK()\n\n except psutil.Error as err:\n self.logError(\"Exception occurred in terminating processes\", \"%s\" % err)\n return S_ERROR()",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def restart(self) -> None:",
"def _restart(self):\n pass",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def Restart(self, request, global_params=None):\n config = self.GetMethodConfig('Restart')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def restart(self):\n\n self.stop()\n self.start()",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()",
"def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def restart(self):\n\t\treturn self.reset().start()",
"def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()",
"def restart(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Restarting `{service_name}`\")\n \n run_command(f\"sudo systemctl restart {service_name}\")",
"def service_restart(appname):\n sudo('service {} restart'.format(appname))",
"def RestartWorkers(config):\n if not config.HasCommandChannels():\n raise ConfigException(\"No URL found for sending command messages. Update \"\n \"your cluster configuration.\")\n Worker.SendKillCommand(zmq.Context(), config.command_sender,\n Worker.CMD_KILL_ERROR)\n time.sleep(1) # wait for ZMQ to flush message queues",
"def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartComponent(self, opts):\n self.stopComponent(opts)\n return self.startComponent(opts)",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()"
] |
[
"0.7612083",
"0.7318449",
"0.72762173",
"0.69061714",
"0.6568057",
"0.6536657",
"0.6463088",
"0.62089056",
"0.61884105",
"0.61884105",
"0.6181434",
"0.61671895",
"0.6118982",
"0.61028636",
"0.6042757",
"0.6039685",
"0.6015281",
"0.59943885",
"0.59732586",
"0.597307",
"0.5972814",
"0.5941249",
"0.5930792",
"0.5922716",
"0.59185475",
"0.5912167",
"0.58690035",
"0.5855097",
"0.58383155",
"0.5836008",
"0.58332807",
"0.5830926",
"0.5826295",
"0.58172864",
"0.58172655",
"0.5801121",
"0.57999897",
"0.5776906",
"0.57711476",
"0.5722214",
"0.57160395",
"0.5711083",
"0.57014734",
"0.5697634",
"0.56919634",
"0.5684979",
"0.56834435",
"0.56774783",
"0.567609",
"0.5670647",
"0.5669467",
"0.5666177",
"0.56652",
"0.56639975",
"0.5661974",
"0.5649557",
"0.56477904",
"0.5646907",
"0.56427264",
"0.5631139",
"0.56246156",
"0.56240886",
"0.562102",
"0.56075186",
"0.55853283",
"0.5584671",
"0.5577557",
"0.5576906",
"0.5544911",
"0.55290437",
"0.55252284",
"0.5507128",
"0.54833156",
"0.5474029",
"0.5473179",
"0.5471455",
"0.546058",
"0.5453078",
"0.5452546",
"0.54421437",
"0.5438671",
"0.5438331",
"0.54341227",
"0.5432741",
"0.5431444",
"0.54238945",
"0.539527",
"0.5389607",
"0.53845924",
"0.53734946",
"0.537272",
"0.53575194",
"0.53390723",
"0.5334618",
"0.5324424",
"0.5316839",
"0.5316839",
"0.5298243",
"0.5291261",
"0.5281668",
"0.5281668"
] |
0.0
|
-1
|
This operation can also be used to restart a shard or mongos node in a sharded cluster instance.
|
async def restart_dbinstance_with_options_async(
self,
request: dds_20151201_models.RestartDBInstanceRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.RestartDBInstanceResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='RestartDBInstance',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.RestartDBInstanceResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def restart_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Restarting cluster\"\n return ret\n\n __salt__[\"trafficserver.restart_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Restarted cluster\"\n return ret",
"def restart_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/restart').format(self.api_version,\n cluster_name,\n service_name)).json()",
"def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def cluster_reboot(cluster):\n map(reboot, cluster)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def restart(name, runas=None):\n return prlctl(\"restart\", salt.utils.data.decode(name), runas=runas)",
"def restart_kernel(self, kernel_id, now=False):",
"async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()",
"def reboot(self, node):",
"def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()",
"def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None):\n if ex_cloud_service_name is None:\n if node.extra is not None:\n ex_cloud_service_name = node.extra.get(\"ex_cloud_service_name\")\n\n if not ex_cloud_service_name:\n raise ValueError(\"ex_cloud_service_name is required.\")\n\n if not ex_deployment_slot:\n ex_deployment_slot = \"Production\"\n\n _deployment_name = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n ).name\n\n try:\n response = self._perform_post(\n self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name)\n + \"/roleinstances/\"\n + _str(node.id)\n + \"?comp=reboot\",\n \"\",\n )\n\n self.raise_for_response(response, 202)\n\n if self._parse_response_for_async_op(response):\n return True\n else:\n return False\n except Exception:\n return False",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True",
"def supervisor_restart():\n log('restart supervisor', yellow)\n sudo('/etc/init.d/supervisor stop')\n sudo('/etc/init.d/supervisor start')\n # sudo('/etc/init.d/supervisor restart')",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def cmd_pamaprestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('map_restart', maxRetries=5)",
"def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])",
"def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def restart(*args, **kwargs):\n return restart_type(args, kwargs)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n hass.services.call(DOMAIN, SERVICE_RESTART, data)",
"def restart_kernel(self, now=False, **kw):",
"def restart():\n stop()\n start()",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def restart_game(context: GameContext) -> None:\n left_spaceship, right_spaceship = create_spaceships()\n context.restart(left_spaceship, right_spaceship)",
"def reboot(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.reboot_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(vm_hostname, force=False, no_redefine=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_shutdown()\n vm.aws_start()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not vm.is_running():\n raise InvalidStateError('\"{}\" is not running'.format(vm.fqdn))\n\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n\n if not no_redefine:\n vm.hypervisor.redefine_vm(vm)\n\n vm.start()\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n log.info('\"{}\" is restarted.'.format(vm.fqdn))",
"def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)",
"def reboot(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.reboot_server(server)\n\n return r",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def test_snat_with_vrouter_agent_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_vrouter_agent()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def Restart(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Restart\", cmd)",
"def reboot_instance(InstanceId=None):\n pass",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)",
"def test_snat_with_kube_manager_restart(self):\n self.addCleanup(self.invalidate_kube_manager_inspect)\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_kube_manager()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))",
"async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')",
"def restart_container(self, container_node):\n try:\n print(\"Restarting container: \", container_node.get_container_id())\n container = self.docker_client.containers.get(container_node.get_container_id())\n container.restart()\n return True\n except docker.errors.APIError as de:\n print(\"Error restarting the container\")\n traceback.print_exc()\n print de\n return False",
"def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def cmd_pafastrestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Fast restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('fast_restart', maxRetries=5)",
"def restart_service_cmd(klass, service):\n raise NotImplementedError",
"def RestartDiscovery(self):\n payload = { \"Arg1\": self.href }\n return self._execute('restartDiscovery', payload=payload, response_object=None)",
"def restart():\n run_commands('python manage.py supervisor restart all')",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def restart(self, _id):\n\n try:\n UpstartJob(_id).restart()\n except DBusException as e:\n raise ServiceOperationError(e)",
"def cmd_restart(self, app_name=None):\n rc = self.socket_command_with_project('restart', app_name)\n return rc",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartService(self):\n return self.session.request('diag/service/')",
"def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def restartInstance(self, pid, instanceName, enabled):\n if not (self.enabled and enabled):\n self.log.info(\"Restarting is disabled, please restart %s manually\" % instanceName)\n self.accounting[instanceName][\"Treatment\"] = \"Please restart it manually\"\n return S_OK(NO_RESTART)\n\n try:\n agentProc = psutil.Process(int(pid))\n processesToTerminate = agentProc.children(recursive=True)\n processesToTerminate.append(agentProc)\n\n for proc in processesToTerminate:\n proc.terminate()\n\n _gone, alive = psutil.wait_procs(processesToTerminate, timeout=5,\n callback=partial(self.on_terminate, instanceName))\n for proc in alive:\n self.log.info(\"Forcefully killing process %s\" % proc.pid)\n proc.kill()\n\n return S_OK()\n\n except psutil.Error as err:\n self.logError(\"Exception occurred in terminating processes\", \"%s\" % err)\n return S_ERROR()",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def _restart(self):\n pass",
"def restart(self) -> None:",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def restart(self):\n\n self.stop()\n self.start()",
"def Restart(self, request, global_params=None):\n config = self.GetMethodConfig('Restart')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()",
"def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def restart(self):\n\t\treturn self.reset().start()",
"def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()",
"def restart(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Restarting `{service_name}`\")\n \n run_command(f\"sudo systemctl restart {service_name}\")",
"def service_restart(appname):\n sudo('service {} restart'.format(appname))",
"def RestartWorkers(config):\n if not config.HasCommandChannels():\n raise ConfigException(\"No URL found for sending command messages. Update \"\n \"your cluster configuration.\")\n Worker.SendKillCommand(zmq.Context(), config.command_sender,\n Worker.CMD_KILL_ERROR)\n time.sleep(1) # wait for ZMQ to flush message queues",
"def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartComponent(self, opts):\n self.stopComponent(opts)\n return self.startComponent(opts)",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()"
] |
[
"0.76114905",
"0.73184144",
"0.7275371",
"0.69063234",
"0.6566835",
"0.65363693",
"0.64630884",
"0.6208303",
"0.6188034",
"0.6188034",
"0.6180357",
"0.61680347",
"0.6119984",
"0.6103842",
"0.6042654",
"0.60374564",
"0.60140425",
"0.59943813",
"0.59744895",
"0.59729147",
"0.59718555",
"0.5942092",
"0.5930985",
"0.59227836",
"0.59178776",
"0.59118783",
"0.5868504",
"0.58555436",
"0.5838051",
"0.583542",
"0.58326286",
"0.5830489",
"0.5824759",
"0.58175",
"0.5816714",
"0.58004534",
"0.57973224",
"0.57755136",
"0.576896",
"0.5720794",
"0.5715547",
"0.571046",
"0.56996083",
"0.56990784",
"0.56904316",
"0.5684119",
"0.56836426",
"0.567977",
"0.5675429",
"0.5671437",
"0.5669973",
"0.56659603",
"0.5665165",
"0.566419",
"0.5662173",
"0.56501406",
"0.56476945",
"0.56474537",
"0.56426483",
"0.5630157",
"0.5624243",
"0.5623776",
"0.5621318",
"0.5606104",
"0.55845684",
"0.55845666",
"0.55776894",
"0.55758196",
"0.5545893",
"0.5528036",
"0.55239016",
"0.55070895",
"0.5482678",
"0.54755616",
"0.5472229",
"0.54708064",
"0.5461514",
"0.54537255",
"0.54513913",
"0.5442118",
"0.5438717",
"0.54385674",
"0.5433876",
"0.54320633",
"0.5431675",
"0.5425846",
"0.5397081",
"0.538845",
"0.5384105",
"0.5373666",
"0.5371354",
"0.5356396",
"0.5338799",
"0.5334231",
"0.5322633",
"0.5317399",
"0.5317399",
"0.52972984",
"0.52906036",
"0.52819395",
"0.52819395"
] |
0.0
|
-1
|
This operation can also be used to restart a shard or mongos node in a sharded cluster instance.
|
def restart_dbinstance(
self,
request: dds_20151201_models.RestartDBInstanceRequest,
) -> dds_20151201_models.RestartDBInstanceResponse:
runtime = util_models.RuntimeOptions()
return self.restart_dbinstance_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def restart_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Restarting cluster\"\n return ret\n\n __salt__[\"trafficserver.restart_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Restarted cluster\"\n return ret",
"def restart_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/restart').format(self.api_version,\n cluster_name,\n service_name)).json()",
"def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def cluster_reboot(cluster):\n map(reboot, cluster)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def restart(name, runas=None):\n return prlctl(\"restart\", salt.utils.data.decode(name), runas=runas)",
"def restart_kernel(self, kernel_id, now=False):",
"async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()",
"def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()",
"def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)",
"def reboot(self, node):",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None):\n if ex_cloud_service_name is None:\n if node.extra is not None:\n ex_cloud_service_name = node.extra.get(\"ex_cloud_service_name\")\n\n if not ex_cloud_service_name:\n raise ValueError(\"ex_cloud_service_name is required.\")\n\n if not ex_deployment_slot:\n ex_deployment_slot = \"Production\"\n\n _deployment_name = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n ).name\n\n try:\n response = self._perform_post(\n self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name)\n + \"/roleinstances/\"\n + _str(node.id)\n + \"?comp=reboot\",\n \"\",\n )\n\n self.raise_for_response(response, 202)\n\n if self._parse_response_for_async_op(response):\n return True\n else:\n return False\n except Exception:\n return False",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True",
"def supervisor_restart():\n log('restart supervisor', yellow)\n sudo('/etc/init.d/supervisor stop')\n sudo('/etc/init.d/supervisor start')\n # sudo('/etc/init.d/supervisor restart')",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def cmd_pamaprestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('map_restart', maxRetries=5)",
"def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])",
"def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def restart(*args, **kwargs):\n return restart_type(args, kwargs)",
"def restart(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n hass.services.call(DOMAIN, SERVICE_RESTART, data)",
"def restart_kernel(self, now=False, **kw):",
"def restart():\n stop()\n start()",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def restart_game(context: GameContext) -> None:\n left_spaceship, right_spaceship = create_spaceships()\n context.restart(left_spaceship, right_spaceship)",
"def reboot(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.reboot_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(vm_hostname, force=False, no_redefine=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_shutdown()\n vm.aws_start()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not vm.is_running():\n raise InvalidStateError('\"{}\" is not running'.format(vm.fqdn))\n\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n\n if not no_redefine:\n vm.hypervisor.redefine_vm(vm)\n\n vm.start()\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n log.info('\"{}\" is restarted.'.format(vm.fqdn))",
"def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)",
"def reboot(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.reboot_server(server)\n\n return r",
"def Restart(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Restart\", cmd)",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def test_snat_with_vrouter_agent_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_vrouter_agent()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_instance(InstanceId=None):\n pass",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)",
"def test_snat_with_kube_manager_restart(self):\n self.addCleanup(self.invalidate_kube_manager_inspect)\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_kube_manager()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))",
"async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')",
"def restart_container(self, container_node):\n try:\n print(\"Restarting container: \", container_node.get_container_id())\n container = self.docker_client.containers.get(container_node.get_container_id())\n container.restart()\n return True\n except docker.errors.APIError as de:\n print(\"Error restarting the container\")\n traceback.print_exc()\n print de\n return False",
"def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def restart_service_cmd(klass, service):\n raise NotImplementedError",
"def cmd_pafastrestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Fast restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('fast_restart', maxRetries=5)",
"def RestartDiscovery(self):\n payload = { \"Arg1\": self.href }\n return self._execute('restartDiscovery', payload=payload, response_object=None)",
"def restart():\n run_commands('python manage.py supervisor restart all')",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def restart(self, _id):\n\n try:\n UpstartJob(_id).restart()\n except DBusException as e:\n raise ServiceOperationError(e)",
"def cmd_restart(self, app_name=None):\n rc = self.socket_command_with_project('restart', app_name)\n return rc",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartService(self):\n return self.session.request('diag/service/')",
"def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def restartInstance(self, pid, instanceName, enabled):\n if not (self.enabled and enabled):\n self.log.info(\"Restarting is disabled, please restart %s manually\" % instanceName)\n self.accounting[instanceName][\"Treatment\"] = \"Please restart it manually\"\n return S_OK(NO_RESTART)\n\n try:\n agentProc = psutil.Process(int(pid))\n processesToTerminate = agentProc.children(recursive=True)\n processesToTerminate.append(agentProc)\n\n for proc in processesToTerminate:\n proc.terminate()\n\n _gone, alive = psutil.wait_procs(processesToTerminate, timeout=5,\n callback=partial(self.on_terminate, instanceName))\n for proc in alive:\n self.log.info(\"Forcefully killing process %s\" % proc.pid)\n proc.kill()\n\n return S_OK()\n\n except psutil.Error as err:\n self.logError(\"Exception occurred in terminating processes\", \"%s\" % err)\n return S_ERROR()",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def restart(self) -> None:",
"def _restart(self):\n pass",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def Restart(self, request, global_params=None):\n config = self.GetMethodConfig('Restart')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def restart(self):\n\n self.stop()\n self.start()",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()",
"def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def restart(self):\n\t\treturn self.reset().start()",
"def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()",
"def restart(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Restarting `{service_name}`\")\n \n run_command(f\"sudo systemctl restart {service_name}\")",
"def service_restart(appname):\n sudo('service {} restart'.format(appname))",
"def RestartWorkers(config):\n if not config.HasCommandChannels():\n raise ConfigException(\"No URL found for sending command messages. Update \"\n \"your cluster configuration.\")\n Worker.SendKillCommand(zmq.Context(), config.command_sender,\n Worker.CMD_KILL_ERROR)\n time.sleep(1) # wait for ZMQ to flush message queues",
"def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartComponent(self, opts):\n self.stopComponent(opts)\n return self.startComponent(opts)",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()"
] |
[
"0.7612083",
"0.7318449",
"0.72762173",
"0.69061714",
"0.6568057",
"0.6536657",
"0.6463088",
"0.62089056",
"0.61884105",
"0.61884105",
"0.6181434",
"0.61671895",
"0.6118982",
"0.61028636",
"0.6042757",
"0.6039685",
"0.6015281",
"0.59943885",
"0.59732586",
"0.597307",
"0.5972814",
"0.5941249",
"0.5930792",
"0.5922716",
"0.59185475",
"0.5912167",
"0.58690035",
"0.5855097",
"0.58383155",
"0.5836008",
"0.58332807",
"0.5830926",
"0.5826295",
"0.58172864",
"0.58172655",
"0.5801121",
"0.57999897",
"0.57711476",
"0.5722214",
"0.57160395",
"0.5711083",
"0.57014734",
"0.5697634",
"0.56919634",
"0.5684979",
"0.56834435",
"0.56774783",
"0.567609",
"0.5670647",
"0.5669467",
"0.5666177",
"0.56652",
"0.56639975",
"0.5661974",
"0.5649557",
"0.56477904",
"0.5646907",
"0.56427264",
"0.5631139",
"0.56246156",
"0.56240886",
"0.562102",
"0.56075186",
"0.55853283",
"0.5584671",
"0.5577557",
"0.5576906",
"0.5544911",
"0.55290437",
"0.55252284",
"0.5507128",
"0.54833156",
"0.5474029",
"0.5473179",
"0.5471455",
"0.546058",
"0.5453078",
"0.5452546",
"0.54421437",
"0.5438671",
"0.5438331",
"0.54341227",
"0.5432741",
"0.5431444",
"0.54238945",
"0.539527",
"0.5389607",
"0.53845924",
"0.53734946",
"0.537272",
"0.53575194",
"0.53390723",
"0.5334618",
"0.5324424",
"0.5316839",
"0.5316839",
"0.5298243",
"0.5291261",
"0.5281668",
"0.5281668"
] |
0.5776906
|
37
|
This operation can also be used to restart a shard or mongos node in a sharded cluster instance.
|
async def restart_dbinstance_async(
self,
request: dds_20151201_models.RestartDBInstanceRequest,
) -> dds_20151201_models.RestartDBInstanceResponse:
runtime = util_models.RuntimeOptions()
return await self.restart_dbinstance_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cluster_restart(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.restart(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster restart failed\")",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def restart_cluster(name):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"\"}\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Restarting cluster\"\n return ret\n\n __salt__[\"trafficserver.restart_cluster\"]()\n\n ret[\"result\"] = True\n ret[\"comment\"] = \"Restarted cluster\"\n return ret",
"def restart_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/restart').format(self.api_version,\n cluster_name,\n service_name)).json()",
"def restart(self, **kwargs):\n return self.client.api.restart(self.id, **kwargs)",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def restart():\n require('PROJECT_NAME')\n\n sudo('supervisorctl restart {0}'.format(env.PROJECT_NAME))",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart():\n with cd(env.directory):\n sudo('./bin/supervisorctl restart all', user=env.deploy_user)",
"def restart_supervisor():\n\n require('environment', provided_by=env.environments)\n supervisor.supervisor_command('restart %(environment)s:*' % env)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def cluster_reboot(cluster):\n map(reboot, cluster)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def restart(name, runas=None):\n return prlctl(\"restart\", salt.utils.data.decode(name), runas=runas)",
"def restart_kernel(self, kernel_id, now=False):",
"async def restart_node(request: web.Request) -> web.Response:\n\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n await director_v2_api.restart_dynamic_service(request.app, f\"{path_params.node_id}\")\n\n raise web.HTTPNoContent()",
"def reboot(self, node):",
"def _restart_workload(self, workload):\n self.log.info('%-20s RESTARTING', workload.name())\n workload.stop()\n workload.post_stop()\n workload.pre_start()\n workload.start()",
"def restart(\n context,\n user=get_local_user(),\n remote=False,\n instance=None,\n stack=None,\n services=None,\n):\n command = \"restart\"\n run_command_with_services(context, user, remote, instance, stack, command, services)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None):\n if ex_cloud_service_name is None:\n if node.extra is not None:\n ex_cloud_service_name = node.extra.get(\"ex_cloud_service_name\")\n\n if not ex_cloud_service_name:\n raise ValueError(\"ex_cloud_service_name is required.\")\n\n if not ex_deployment_slot:\n ex_deployment_slot = \"Production\"\n\n _deployment_name = self._get_deployment(\n service_name=ex_cloud_service_name, deployment_slot=ex_deployment_slot\n ).name\n\n try:\n response = self._perform_post(\n self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name)\n + \"/roleinstances/\"\n + _str(node.id)\n + \"?comp=reboot\",\n \"\",\n )\n\n self.raise_for_response(response, 202)\n\n if self._parse_response_for_async_op(response):\n return True\n else:\n return False\n except Exception:\n return False",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def restart(self):\r\n self._update('restart')\r\n\r\n self.supervisord.options.mood = SupervisorStates.RESTARTING\r\n return True",
"def supervisor_restart():\n log('restart supervisor', yellow)\n sudo('/etc/init.d/supervisor stop')\n sudo('/etc/init.d/supervisor start')\n # sudo('/etc/init.d/supervisor restart')",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def restart_salt():\n stop_salt()\n start_salt()",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def cmd_pamaprestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('map_restart', maxRetries=5)",
"def restart_service(service_name):\n subprocess.run([SUPERVISOR_CMD, \"restart\", service_name])",
"def restart_treesheets():\n # The restart command in my init.d script fails for some reason.\n # But stop and start works.\n # TODO(eob): Fix the restart init.d script.\n sudo('/etc/init.d/treesheets stop')\n sudo('/etc/init.d/treesheets start')",
"def test_snat_pod_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)\n assert self.restart_pod(client1[0])\n assert self.restart_pod(client2[0])\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def restart(*args, **kwargs):\n return restart_type(args, kwargs)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n hass.services.call(DOMAIN, SERVICE_RESTART, data)",
"def restart_kernel(self, now=False, **kw):",
"def restart():\n stop()\n start()",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def restart(service):\n # TODO: replace this with your relevant restart logic\n assert service.isalpha()\n run(\"service\", service, \"restart\")",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def restart_game(context: GameContext) -> None:\n left_spaceship, right_spaceship = create_spaceships()\n context.restart(left_spaceship, right_spaceship)",
"def reboot(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.reboot_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def test_snat_with_nodes_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n for node in self.inputs.k8s_slave_ips:\n self.inputs.reboot(node)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(vm_hostname, force=False, no_redefine=False):\n with ExitStack() as es:\n vm = es.enter_context(_get_vm(vm_hostname))\n if vm.dataset_obj['datacenter_type'] == 'aws.dct':\n vm.aws_shutdown()\n vm.aws_start()\n elif vm.dataset_obj['datacenter_type'] == 'kvm.dct':\n _check_defined(vm)\n\n if not vm.is_running():\n raise InvalidStateError('\"{}\" is not running'.format(vm.fqdn))\n\n if force:\n vm.hypervisor.stop_vm_force(vm)\n else:\n vm.shutdown()\n\n if not no_redefine:\n vm.hypervisor.redefine_vm(vm)\n\n vm.start()\n else:\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n log.info('\"{}\" is restarted.'.format(vm.fqdn))",
"def test_restart_node_with_encrypted_pkeys(self):\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n rest = RestConnection(self.master)\n nodes_in_cluster = [node for node in self.servers[:self.nodes_init]]\n for node in self.servers[1:self.nodes_init]:\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n self.sleep(10, \"Wait after restart\")\n self.cluster.async_failover(nodes_in_cluster,\n [node],\n graceful=False)\n self.wait_for_failover_or_assert(1)\n rest.set_recovery_type(\"ns_1@\" + node.ip, recoveryType=\"delta\")\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster, [], [])\n CbServer.use_https = https_val\n self.wait_for_rebalance_to_complete(task)\n shell = RemoteMachineShellConnection(node)\n shell.restart_couchbase()\n shell.disconnect()\n https_val = CbServer.use_https # so that add_node uses https\n CbServer.use_https = True\n task = self.cluster.async_rebalance(nodes_in_cluster,\n [], [node])\n self.wait_for_rebalance_to_complete(task)\n CbServer.use_https = https_val\n nodes_in_cluster.remove(node)",
"def reboot(self, name=None):\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.reboot_server(server)\n\n return r",
"def Restart(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Restart\", cmd)",
"def test_snat_with_vrouter_agent_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_vrouter_agent()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def reboot_instance(InstanceId=None):\n pass",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def vm_restart(self, params: dict) -> Tuple[\"Status\", dict]:",
"def resize_cluster(ctx, project_name, cluster_name, instance_size_name):\n project = ctx.obj.groups.byName[project_name].get().data\n\n new_cluster_config = {\n 'clusterType': 'REPLICASET',\n 'providerSettings': {\n 'providerName': 'AWS',\n 'regionName': 'US_WEST_1',\n 'instanceSizeName': instance_size_name}}\n\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].patch(\n **new_cluster_config)\n pprint(cluster.data)",
"def test_snat_with_kube_manager_restart(self):\n self.addCleanup(self.invalidate_kube_manager_inspect)\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n #perform the kube manager restart\n self.restart_kube_manager()\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(verbose=False, force=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)",
"async def reboot(self, ctx):\n restart_land = discord.Embed(\n title=\"Restarting\", description=\"Please wait...\", colour=0x690E8\n )\n re_msg = await ctx.send(embed=restart_land)\n pm2_id = os.environ.get(\"pm_id\")\n if_systemd = os.environ.get(\"systemd_supervised\")\n if pm2_id:\n await re_msg.edit(content=\"pm2: :wave: bye!\")\n await self.bot.session.close()\n await self.bot.logout()\n await run_cmd(f\"pm2 restart {pm2_id}\")\n elif if_systemd:\n await re_msg.edit(content=\"systemd: :wave: bye!\")\n await self.bot.session.close()\n await run_cmd(\"systemctl --user restart lolbot\")\n await self.bot.logout()\n else:\n await re_msg.edit(content=\":warning: No supervisor; invoking\" \" `shutdown`\")\n await ctx.invoke(self.bot.get_command(\"shutdown\"))",
"async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')",
"def restart_container(self, container_node):\n try:\n print(\"Restarting container: \", container_node.get_container_id())\n container = self.docker_client.containers.get(container_node.get_container_id())\n container.restart()\n return True\n except docker.errors.APIError as de:\n print(\"Error restarting the container\")\n traceback.print_exc()\n print de\n return False",
"def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def cmd_pafastrestart(self, data, client, cmd):\n if self._isranked:\n client.message('You can\\'t restart a map on ranked servers')\n else:\n self.console.say('Fast restarting map in 2 seconds...')\n time.sleep(2)\n self.console.write('fast_restart', maxRetries=5)",
"def restart_service_cmd(klass, service):\n raise NotImplementedError",
"def RestartDiscovery(self):\n payload = { \"Arg1\": self.href }\n return self._execute('restartDiscovery', payload=payload, response_object=None)",
"def restart():\n run_commands('python manage.py supervisor restart all')",
"def restart(name):\n ret = \"restart False\"\n if stop(name) and start(name):\n ret = \"restart True\"\n return ret",
"def restart(self, _id):\n\n try:\n UpstartJob(_id).restart()\n except DBusException as e:\n raise ServiceOperationError(e)",
"def cmd_restart(self, app_name=None):\n rc = self.socket_command_with_project('restart', app_name)\n return rc",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartService(self):\n return self.session.request('diag/service/')",
"def restart(verbose=False, force=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('restart')\n _validate_force(force, 'restart')\n\n stop(verbose, force)\n start(verbose)\n _print_time()",
"def replace_with_reset_resume_state_test(self):\n\n cluster = self.cluster\n cluster.populate(3).start()\n node1, node2, node3 = cluster.nodelist()\n\n node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)'])\n\n session = self.patient_cql_connection(node1)\n stress_table = 'keyspace1.standard1'\n query = SimpleStatement('select * from %s LIMIT 1' % stress_table, consistency_level=ConsistencyLevel.THREE)\n initial_data = rows_to_list(session.execute(query))\n\n node3.stop(gently=False)\n\n # kill node1 in the middle of streaming to let it fail\n t = InterruptBootstrap(node1)\n t.start()\n # replace node 3 with node 4\n debug(\"Starting node 4 to replace node 3\")\n node4 = Node('node4', cluster=cluster, auto_bootstrap=True, thrift_interface=('127.0.0.4', 9160),\n storage_interface=('127.0.0.4', 7000), jmx_port='7400', remote_debug_port='0',\n initial_token=None, binary_interface=('127.0.0.4', 9042))\n\n # keep timeout low so that test won't hang\n node4.set_configuration_options(values={'streaming_socket_timeout_in_ms': 1000})\n cluster.add(node4, False)\n try:\n node4.start(jvm_args=[\"-Dcassandra.replace_address_first_boot=127.0.0.3\"], wait_other_notice=False)\n except NodeError:\n pass # node doesn't start as expected\n t.join()\n node1.start()\n\n # restart node4 bootstrap with resetting bootstrap state\n node4.stop()\n mark = node4.mark_log()\n node4.start(jvm_args=[\n \"-Dcassandra.replace_address_first_boot=127.0.0.3\",\n \"-Dcassandra.reset_bootstrap_progress=true\"\n ])\n # check if we reset bootstrap state\n node4.watch_log_for(\"Resetting bootstrap progress to start fresh\", from_mark=mark)\n # wait for node3 ready to query\n node4.watch_log_for(\"Listening for thrift clients...\", from_mark=mark)\n\n # check if 2nd bootstrap succeeded\n assert_bootstrap_state(self, node4, 'COMPLETED')\n\n # query should work again\n debug(\"Stopping old nodes\")\n node1.stop(gently=False, wait_other_notice=True)\n node2.stop(gently=False, wait_other_notice=True)\n\n debug(\"Verifying data on new node.\")\n session = self.patient_exclusive_cql_connection(node4)\n assert_all(session, 'SELECT * from {} LIMIT 1'.format(stress_table),\n expected=initial_data,\n cl=ConsistencyLevel.ONE)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def restartInstance(self, pid, instanceName, enabled):\n if not (self.enabled and enabled):\n self.log.info(\"Restarting is disabled, please restart %s manually\" % instanceName)\n self.accounting[instanceName][\"Treatment\"] = \"Please restart it manually\"\n return S_OK(NO_RESTART)\n\n try:\n agentProc = psutil.Process(int(pid))\n processesToTerminate = agentProc.children(recursive=True)\n processesToTerminate.append(agentProc)\n\n for proc in processesToTerminate:\n proc.terminate()\n\n _gone, alive = psutil.wait_procs(processesToTerminate, timeout=5,\n callback=partial(self.on_terminate, instanceName))\n for proc in alive:\n self.log.info(\"Forcefully killing process %s\" % proc.pid)\n proc.kill()\n\n return S_OK()\n\n except psutil.Error as err:\n self.logError(\"Exception occurred in terminating processes\", \"%s\" % err)\n return S_ERROR()",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"def restart(self) -> None:",
"def _restart(self):\n pass",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def Restart(self, request, global_params=None):\n config = self.GetMethodConfig('Restart')\n return self._RunMethod(\n config, request, global_params=global_params)",
"def restart(self):\n\n self.stop()\n self.start()",
"def test_05_node_down_and_resync_hard(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n test_rest.db_simulate(cluster, 240)\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port} - during load')\n test_rest.docker_stop(cluster, port)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n test_rest.cluster.verify_data()",
"def test_04_node_down_and_resync_soft(self):\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'stopping cluster {cluster} node with port {port}')\n test_rest.docker_stop(cluster, port)\n test_rest.step(f\"starting db_simulator on cluster {cluster}\")\n test_rest.db_simulate(cluster, 180)\n # restart nodes\n for cluster in test_rest.cluster.clusters:\n if cluster == 'index':\n continue\n port = test_rest.cluster.clusters[cluster][0][0]\n test_rest.step(f'restarting cluster {cluster} node with port {port}')\n test_rest.expand_data_cluster(cluster, port=port)\n test_rest.step(\"restarted nodes, waiting 10 seconds to begin monitoring table state & running sync jobs\")\n time.sleep(10)\n test_rest.cluster.insync_and_state_check()\n while test_rest.is_running_simulations():\n print(\"waiting on running simulations to complete\")\n time.sleep(10)\n test_rest.cluster.verify_data()",
"def reboot():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><restart><system></system></restart></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def restart(self):\n\t\treturn self.reset().start()",
"def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()",
"def restart(service_name: str, print_action: bool = True):\n\n if print_action:\n print_log_status(3, f\"Restarting `{service_name}`\")\n \n run_command(f\"sudo systemctl restart {service_name}\")",
"def service_restart(appname):\n sudo('service {} restart'.format(appname))",
"def RestartWorkers(config):\n if not config.HasCommandChannels():\n raise ConfigException(\"No URL found for sending command messages. Update \"\n \"your cluster configuration.\")\n Worker.SendKillCommand(zmq.Context(), config.command_sender,\n Worker.CMD_KILL_ERROR)\n time.sleep(1) # wait for ZMQ to flush message queues",
"def test_snat_with_docker_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"docker\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30)\n cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()\n assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (\n error_nodes)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restartComponent(self, opts):\n self.stopComponent(opts)\n return self.startComponent(opts)",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')",
"def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()",
"def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()"
] |
[
"0.7611809",
"0.7319289",
"0.72763574",
"0.6905782",
"0.65671515",
"0.6536635",
"0.6462853",
"0.62082076",
"0.61881244",
"0.61881244",
"0.6181432",
"0.6168009",
"0.612062",
"0.6102914",
"0.60427433",
"0.60393894",
"0.6015079",
"0.59957445",
"0.59742093",
"0.59734917",
"0.59728605",
"0.5940834",
"0.5931537",
"0.5922823",
"0.5917834",
"0.5911281",
"0.58690774",
"0.5855625",
"0.5839076",
"0.5835775",
"0.5832905",
"0.58314794",
"0.5825886",
"0.58170456",
"0.5817043",
"0.5800561",
"0.5800272",
"0.5777159",
"0.5770701",
"0.5722113",
"0.5715851",
"0.57118446",
"0.57006425",
"0.56984156",
"0.56916547",
"0.56856173",
"0.568421",
"0.56780267",
"0.5676698",
"0.5671826",
"0.5670055",
"0.5666855",
"0.56639576",
"0.5663669",
"0.5661989",
"0.56493866",
"0.5647824",
"0.5645852",
"0.56424034",
"0.56308955",
"0.56245726",
"0.56241894",
"0.56218606",
"0.56090534",
"0.5586154",
"0.55852306",
"0.55772376",
"0.55769986",
"0.5544845",
"0.5528509",
"0.552504",
"0.5506963",
"0.54833484",
"0.5473721",
"0.5472826",
"0.54717773",
"0.54602003",
"0.5454702",
"0.5452446",
"0.5442306",
"0.5438548",
"0.54380476",
"0.5434177",
"0.54332733",
"0.5431759",
"0.5424457",
"0.5395866",
"0.5389842",
"0.5384802",
"0.53744954",
"0.5372317",
"0.53567874",
"0.533754",
"0.53330195",
"0.5324159",
"0.5317013",
"0.5317013",
"0.52975047",
"0.5290092",
"0.528166",
"0.528166"
] |
0.0
|
-1
|
The instance must be running when you call this operation. > This operation is applicable to replica set instances and sharded cluster instances, but cannot be performed on standalone instances. On replica set instances, the switch is performed between instances. On sharded cluster instances, the switch is performed between shards.
|
def switch_dbinstance_hawith_options(
self,
request: dds_20151201_models.SwitchDBInstanceHARequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.SwitchDBInstanceHAResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.role_ids):
query['RoleIds'] = request.role_ids
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.switch_mode):
query['SwitchMode'] = request.switch_mode
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='SwitchDBInstanceHA',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.SwitchDBInstanceHAResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reboot_instance(InstanceId=None):\n pass",
"def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'",
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def start_instance(InstanceId=None):\n pass",
"def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')",
"def test_specific_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n compute1 = self.start_service('compute', host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='1')\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)",
"def lock_instance(vm_id):\n cli.openstack(cmd='server lock', positional_args=vm_id)",
"def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)",
"def test_lock_instance(self, instance, instances_steps):\n instances_steps.lock_instance(instance.name)\n instances_steps.unlock_instance(instance.name)",
"def _maybe_restart_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Compute service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n # Do nothing if the status is not RUNNING to avoid race. This will cover\n # most of the cases.\n if status == COMPUTE_STATUS_RUNNING:\n logging.info('Stopping GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().stop(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n enqueue_start_task(instance, zone)",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def __switchToPrimary(self, datadir):\n cmd = ClusterCommand.getSwitchOverCmd(self.user, self.dbNodeInfo.id, datadir)\n self.logger.debug(\"Switch to primary: %s\" % cmd)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Switch instance to be primary failed!Datadir %s.\\nOutput: %s\" % (datadir, output))",
"def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def start_notebook_instance(NotebookInstanceName=None):\n pass",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)",
"def SSHToInstance(self, args, instance):\n args = self._DefaultArgsForSSH(args)\n\n external_nat = ssh_utils.GetExternalIPAddress(instance)\n log.status.Print(\n 'Trying to SSH to VM with NAT IP:{}'.format(external_nat))\n args.ssh_key_file = ssh.Keys.DEFAULT_KEY_FILE\n\n ssh_helper = ssh_utils.BaseSSHCLIHelper()\n ssh_helper.Run(args)\n identity_file = ssh_helper.keys.key_file\n\n user, _ = ssh_utils.GetUserAndInstance(args.name)\n host_keys = self._GetHostKeyFromInstance(args.zone, ssh_helper, instance)\n options = self._GetSSHOptions(args.name, ssh_helper,\n instance, host_keys)\n\n public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)\n oslogin_state = ssh.GetOsloginState(\n instance,\n ssh_helper.GetProject(\n self.client, properties.VALUES.core.project.Get(required=True)),\n user,\n public_key,\n None,\n self.release_track,\n username_requested=False,\n messages=self.client.messages)\n user = oslogin_state.user\n\n remote = ssh.Remote(external_nat, user)\n if not oslogin_state.oslogin_enabled:\n self._WaitForSSHKeysToPropagate(ssh_helper, remote, identity_file, user,\n instance, options)\n\n extra_flags = []\n # Ctpu seems to be forwarding some other ports on what\n # seems like the TPU node. Need to understand better before enabling.\n if args.forward_ports:\n extra_flags.extend(\n ['-A', '-L', '6006:localhost:6006', '-L', '8888:localhost:8888'])\n ssh_cmd_args = {\n 'remote': remote,\n 'identity_file': identity_file,\n 'options': options,\n 'extra_flags': extra_flags\n }\n\n cmd = ssh.SSHCommand(**ssh_cmd_args)\n max_attempts = 10\n sleep_interval = 30\n # Since the instance was just created, it can take a while for the instance\n # to be ready to accept ssh connections, therefore retry up to 5m. Doesn't\n # need to be backed off, regular interval retry is sufficient since we\n # aren't looking to throttle.\n for i in range(max_attempts):\n try:\n log.status.Print('SSH Attempt #{}...'.format(i))\n # Errors from SSH itself result in an ssh.CommandError being raised\n return_code = cmd.Run(\n ssh_helper.env,\n putty_force_connect=properties.VALUES.ssh.putty_force_connect.GetBool())\n if return_code:\n # This is the return code of the remote command.\n # Problems with SSH itself will result in ssh.CommandError\n # being raised above.\n sys.exit(return_code)\n except ssh.CommandError as e:\n if i == max_attempts - 1:\n raise e\n log.status.Print(\n 'Retrying: SSH command error: {}'.format(six.text_type(e)))\n time.sleep(sleep_interval)\n continue\n break",
"def spawn(self, context, instance,\n network_info=None, block_device_info=None):\n LOG.debug(\"spawn\")\n\n instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance[\"availability_zone\"])\n\n # update instances table\n bmm, reuse = self._select_machine(context, instance)\n instance[\"display_name\"] = bmm[\"name\"]\n instance[\"availability_zone\"] = instance_zone\n db.instance_update(context, \n instance[\"id\"], \n {\"display_name\": bmm[\"name\"],\n \"availability_zone\": instance_zone})\n if vlan_id:\n db.bmm_update(context, bmm[\"id\"], {\"availability_zone\": cluster_name, \n \"vlan_id\": vlan_id,\n \"service_ip\": None})\n \n if instance_zone == \"resource_pool\":\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n else: \n self._update_ofc(bmm, cluster_name)\n if bmm[\"instance_id\"]:\n db.instance_destroy(context, bmm[\"instance_id\"])\n\n if reuse:\n db.bmm_update(context, bmm[\"id\"], {\"status\": \"used\", \n \"instance_id\": instance[\"id\"]}) \n else:\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n \n if instance[\"key_data\"]:\n self._inject_key(bmm[\"pxe_ip\"], str(instance[\"key_data\"]))",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True",
"def test_specific_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='2')\n\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")",
"def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)",
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")",
"def test_update_instances_schedule_state(self):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def _maybe_start_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Unable to start Compute instance, service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n if status == COMPUTE_STATUS_TERMINATED:\n logging.info('Starting GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().start(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n\n if status != COMPUTE_STATUS_RUNNING:\n # If in an intermediate state: PROVISIONING, STAGING, STOPPING, requeue\n # the task to check back later. If in TERMINATED state, also requeue the\n # task since the start attempt may fail and we should retry.\n enqueue_start_task(instance, zone)",
"def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def test_view_instance(self, instance, instances_steps):\n instances_steps.view_instance(instance.name)",
"def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()",
"def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")",
"def get_instance(instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n available_maintenance_versions=pulumi.get(__ret__, 'available_maintenance_versions'),\n backend_type=pulumi.get(__ret__, 'backend_type'),\n connection_name=pulumi.get(__ret__, 'connection_name'),\n create_time=pulumi.get(__ret__, 'create_time'),\n current_disk_size=pulumi.get(__ret__, 'current_disk_size'),\n database_installed_version=pulumi.get(__ret__, 'database_installed_version'),\n database_version=pulumi.get(__ret__, 'database_version'),\n disk_encryption_configuration=pulumi.get(__ret__, 'disk_encryption_configuration'),\n disk_encryption_status=pulumi.get(__ret__, 'disk_encryption_status'),\n etag=pulumi.get(__ret__, 'etag'),\n failover_replica=pulumi.get(__ret__, 'failover_replica'),\n gce_zone=pulumi.get(__ret__, 'gce_zone'),\n instance_type=pulumi.get(__ret__, 'instance_type'),\n ip_addresses=pulumi.get(__ret__, 'ip_addresses'),\n ipv6_address=pulumi.get(__ret__, 'ipv6_address'),\n kind=pulumi.get(__ret__, 'kind'),\n maintenance_version=pulumi.get(__ret__, 'maintenance_version'),\n master_instance_name=pulumi.get(__ret__, 'master_instance_name'),\n max_disk_size=pulumi.get(__ret__, 'max_disk_size'),\n name=pulumi.get(__ret__, 'name'),\n on_premises_configuration=pulumi.get(__ret__, 'on_premises_configuration'),\n out_of_disk_report=pulumi.get(__ret__, 'out_of_disk_report'),\n project=pulumi.get(__ret__, 'project'),\n region=pulumi.get(__ret__, 'region'),\n replica_configuration=pulumi.get(__ret__, 'replica_configuration'),\n replica_names=pulumi.get(__ret__, 'replica_names'),\n root_password=pulumi.get(__ret__, 'root_password'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n scheduled_maintenance=pulumi.get(__ret__, 'scheduled_maintenance'),\n secondary_gce_zone=pulumi.get(__ret__, 'secondary_gce_zone'),\n self_link=pulumi.get(__ret__, 'self_link'),\n server_ca_cert=pulumi.get(__ret__, 'server_ca_cert'),\n service_account_email_address=pulumi.get(__ret__, 'service_account_email_address'),\n settings=pulumi.get(__ret__, 'settings'),\n state=pulumi.get(__ret__, 'state'),\n suspension_reason=pulumi.get(__ret__, 'suspension_reason'))",
"def power_on(self, context, instance, network_info, block_device_info):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.start_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def stop_instance(InstanceId=None, Force=None):\n pass",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')",
"def _is_instance_running(settings, instance_id_ip,\n ip_given=False):\n instance = get_this_instance(settings,\n instance_id_ip, ip_given)\n if instance:\n if ip_given:\n ip_address = instance_id_ip\n else:\n ip_address = get_instance_ip(instance)\n state = instance.state\n print 'Current status of Instance'\\\n ' with IP [%s]: %s' %(ip_address, state)\n if state == \"running\" and ip_address:\n return True\n return False",
"def assign_instance(InstanceId=None, LayerIds=None):\n pass",
"def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))",
"def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)",
"def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")",
"def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()",
"def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")",
"def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def test_least_busy_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, s_ref['id'])\n db.instance_destroy(self.context, s_ref2['id'])",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise",
"def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)",
"def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))",
"def view_instance(self, instance_name, check=True):\n self.page_instances().table_instances.row(\n name=instance_name).link_instance.click()\n\n if check:\n assert self.app.page_instance.info_instance.label_name.value \\\n == instance_name",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")",
"def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()",
"def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()",
"def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)",
"def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance",
"def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"",
"def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)",
"def start_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n print(\"\\n===Creating EC2 instance.\")\n ec2_client.start_instances(InstanceIds=instances_ids)\n \n # wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"===EC2 instance is ready!\")",
"def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id",
"def nfvi_cold_migrate_instance(instance_uuid, callback, to_host_name=None,\n context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n return cmd_id",
"def _ensureComponentRunning(self, shouldBeRunning):\n for instance in shouldBeRunning:\n self.log.info(\"Starting instance %s\" % instance)\n system, name = instance.split('__')\n if self.controlComponents:\n res = self.sysAdminClient.startComponent(system, name)\n if not res['OK']:\n self.logError(\"Failed to start component:\", \"%s: %s\" % (instance, res['Message']))\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance was down, started instance\"\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance is down, should be started\"",
"def test_launch_volume_as_instance(self, volume, instances_steps,\n volumes_steps):\n instance_name = next(generate_ids('instance'))\n volumes_steps.launch_volume_as_instance(\n volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)\n\n instances_steps.page_instances().table_instances.row(\n name=instance_name).wait_for_status('Active')\n instances_steps.delete_instance(instance_name)",
"def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))",
"def test_run_terminate_no_image(self):\n params = {'image_ref': ''}\n instance = self._create_fake_instance_obj(params)\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n self._assert_state({'vm_state': vm_states.ACTIVE,\n 'task_state': None})\n\n self.compute.terminate_instance(self.context, instance, [])\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 0)",
"def power_off(self, instance, node=None):\n if not node:\n node = _get_baremetal_node_by_instance_uuid(instance['uuid'])\n pm = get_power_manager(node=node, instance=instance)\n pm.deactivate_node()\n if pm.state != baremetal_states.DELETED:\n raise exception.InstancePowerOffFailure(_(\n \"Baremetal power manager failed to stop node \"\n \"for instance %r\") % instance['uuid'])\n pm.stop_console()",
"def terminateInstance(region,zone,instance_id):\n\ttry:\n\t\tec2 = boto.ec2.connect_to_region(region+'-'+zone)\n\t\tec2.terminate_instances(instance_ids=[instance_id])\n\t\treturn True\n\texcept Exception as e:\n\t\tlogError(e)\n\t\treturn False",
"def lock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_lock.click()\n\n if check:\n self.close_notification('success')",
"def create_instance(self, nova, image_name, instance_name, flavor):\n image = nova.images.find(name=image_name)\n flavor = nova.flavors.find(name=flavor)\n instance = nova.servers.create(name=instance_name, image=image,\n flavor=flavor)\n\n count = 1\n status = instance.status\n while status != 'ACTIVE' and count < 60:\n time.sleep(3)\n instance = nova.servers.get(instance.id)\n status = instance.status\n self.log.debug('instance status: {}'.format(status))\n count += 1\n\n if status != 'ACTIVE':\n self.log.error('instance creation timed out')\n return None\n\n return instance",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2"
] |
[
"0.61040485",
"0.6096324",
"0.60282654",
"0.58379394",
"0.58264416",
"0.5758901",
"0.57290405",
"0.56599015",
"0.56398165",
"0.55757517",
"0.55595875",
"0.55456",
"0.5543997",
"0.5541205",
"0.5492753",
"0.5477153",
"0.54576474",
"0.54539895",
"0.54452765",
"0.5433373",
"0.5427934",
"0.54262674",
"0.5394101",
"0.53921956",
"0.53793555",
"0.5374121",
"0.53694385",
"0.5356032",
"0.5355845",
"0.5334702",
"0.531998",
"0.5316969",
"0.53069645",
"0.5305549",
"0.5303321",
"0.52937686",
"0.5276283",
"0.5269501",
"0.52649236",
"0.5262889",
"0.525724",
"0.52433527",
"0.52421004",
"0.52251714",
"0.5224862",
"0.5213376",
"0.5189451",
"0.5187047",
"0.51805925",
"0.5180383",
"0.51763123",
"0.51650786",
"0.51582605",
"0.51564556",
"0.5142721",
"0.5141481",
"0.513841",
"0.5136372",
"0.5131935",
"0.51271975",
"0.5120529",
"0.51199496",
"0.5117998",
"0.5117303",
"0.50995845",
"0.5099351",
"0.5097509",
"0.50974715",
"0.50796443",
"0.50686026",
"0.5065488",
"0.50626326",
"0.5055086",
"0.5055079",
"0.5047959",
"0.50378907",
"0.50366485",
"0.5028522",
"0.50255674",
"0.5016331",
"0.50097466",
"0.5007584",
"0.5006492",
"0.5005757",
"0.4992329",
"0.49847522",
"0.49704602",
"0.49665394",
"0.49637017",
"0.4962652",
"0.49552613",
"0.49530962",
"0.4949279",
"0.49383482",
"0.49379858",
"0.4935344",
"0.49324194",
"0.49296668",
"0.4925811",
"0.49148324",
"0.49076656"
] |
0.0
|
-1
|
The instance must be running when you call this operation. > This operation is applicable to replica set instances and sharded cluster instances, but cannot be performed on standalone instances. On replica set instances, the switch is performed between instances. On sharded cluster instances, the switch is performed between shards.
|
async def switch_dbinstance_hawith_options_async(
self,
request: dds_20151201_models.SwitchDBInstanceHARequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.SwitchDBInstanceHAResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.node_id):
query['NodeId'] = request.node_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.role_ids):
query['RoleIds'] = request.role_ids
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
if not UtilClient.is_unset(request.switch_mode):
query['SwitchMode'] = request.switch_mode
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='SwitchDBInstanceHA',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.SwitchDBInstanceHAResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reboot_instance(InstanceId=None):\n pass",
"def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'",
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def start_instance(InstanceId=None):\n pass",
"def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')",
"def test_specific_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n compute1 = self.start_service('compute', host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='1')\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)",
"def lock_instance(vm_id):\n cli.openstack(cmd='server lock', positional_args=vm_id)",
"def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)",
"def test_lock_instance(self, instance, instances_steps):\n instances_steps.lock_instance(instance.name)\n instances_steps.unlock_instance(instance.name)",
"def _maybe_restart_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Compute service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n # Do nothing if the status is not RUNNING to avoid race. This will cover\n # most of the cases.\n if status == COMPUTE_STATUS_RUNNING:\n logging.info('Stopping GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().stop(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n enqueue_start_task(instance, zone)",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def __switchToPrimary(self, datadir):\n cmd = ClusterCommand.getSwitchOverCmd(self.user, self.dbNodeInfo.id, datadir)\n self.logger.debug(\"Switch to primary: %s\" % cmd)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Switch instance to be primary failed!Datadir %s.\\nOutput: %s\" % (datadir, output))",
"def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def start_notebook_instance(NotebookInstanceName=None):\n pass",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)",
"def spawn(self, context, instance,\n network_info=None, block_device_info=None):\n LOG.debug(\"spawn\")\n\n instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance[\"availability_zone\"])\n\n # update instances table\n bmm, reuse = self._select_machine(context, instance)\n instance[\"display_name\"] = bmm[\"name\"]\n instance[\"availability_zone\"] = instance_zone\n db.instance_update(context, \n instance[\"id\"], \n {\"display_name\": bmm[\"name\"],\n \"availability_zone\": instance_zone})\n if vlan_id:\n db.bmm_update(context, bmm[\"id\"], {\"availability_zone\": cluster_name, \n \"vlan_id\": vlan_id,\n \"service_ip\": None})\n \n if instance_zone == \"resource_pool\":\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n else: \n self._update_ofc(bmm, cluster_name)\n if bmm[\"instance_id\"]:\n db.instance_destroy(context, bmm[\"instance_id\"])\n\n if reuse:\n db.bmm_update(context, bmm[\"id\"], {\"status\": \"used\", \n \"instance_id\": instance[\"id\"]}) \n else:\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n \n if instance[\"key_data\"]:\n self._inject_key(bmm[\"pxe_ip\"], str(instance[\"key_data\"]))",
"def SSHToInstance(self, args, instance):\n args = self._DefaultArgsForSSH(args)\n\n external_nat = ssh_utils.GetExternalIPAddress(instance)\n log.status.Print(\n 'Trying to SSH to VM with NAT IP:{}'.format(external_nat))\n args.ssh_key_file = ssh.Keys.DEFAULT_KEY_FILE\n\n ssh_helper = ssh_utils.BaseSSHCLIHelper()\n ssh_helper.Run(args)\n identity_file = ssh_helper.keys.key_file\n\n user, _ = ssh_utils.GetUserAndInstance(args.name)\n host_keys = self._GetHostKeyFromInstance(args.zone, ssh_helper, instance)\n options = self._GetSSHOptions(args.name, ssh_helper,\n instance, host_keys)\n\n public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)\n oslogin_state = ssh.GetOsloginState(\n instance,\n ssh_helper.GetProject(\n self.client, properties.VALUES.core.project.Get(required=True)),\n user,\n public_key,\n None,\n self.release_track,\n username_requested=False,\n messages=self.client.messages)\n user = oslogin_state.user\n\n remote = ssh.Remote(external_nat, user)\n if not oslogin_state.oslogin_enabled:\n self._WaitForSSHKeysToPropagate(ssh_helper, remote, identity_file, user,\n instance, options)\n\n extra_flags = []\n # Ctpu seems to be forwarding some other ports on what\n # seems like the TPU node. Need to understand better before enabling.\n if args.forward_ports:\n extra_flags.extend(\n ['-A', '-L', '6006:localhost:6006', '-L', '8888:localhost:8888'])\n ssh_cmd_args = {\n 'remote': remote,\n 'identity_file': identity_file,\n 'options': options,\n 'extra_flags': extra_flags\n }\n\n cmd = ssh.SSHCommand(**ssh_cmd_args)\n max_attempts = 10\n sleep_interval = 30\n # Since the instance was just created, it can take a while for the instance\n # to be ready to accept ssh connections, therefore retry up to 5m. Doesn't\n # need to be backed off, regular interval retry is sufficient since we\n # aren't looking to throttle.\n for i in range(max_attempts):\n try:\n log.status.Print('SSH Attempt #{}...'.format(i))\n # Errors from SSH itself result in an ssh.CommandError being raised\n return_code = cmd.Run(\n ssh_helper.env,\n putty_force_connect=properties.VALUES.ssh.putty_force_connect.GetBool())\n if return_code:\n # This is the return code of the remote command.\n # Problems with SSH itself will result in ssh.CommandError\n # being raised above.\n sys.exit(return_code)\n except ssh.CommandError as e:\n if i == max_attempts - 1:\n raise e\n log.status.Print(\n 'Retrying: SSH command error: {}'.format(six.text_type(e)))\n time.sleep(sleep_interval)\n continue\n break",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True",
"def test_specific_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='2')\n\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)",
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")",
"def test_update_instances_schedule_state(self):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def _maybe_start_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Unable to start Compute instance, service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n if status == COMPUTE_STATUS_TERMINATED:\n logging.info('Starting GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().start(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n\n if status != COMPUTE_STATUS_RUNNING:\n # If in an intermediate state: PROVISIONING, STAGING, STOPPING, requeue\n # the task to check back later. If in TERMINATED state, also requeue the\n # task since the start attempt may fail and we should retry.\n enqueue_start_task(instance, zone)",
"def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def test_view_instance(self, instance, instances_steps):\n instances_steps.view_instance(instance.name)",
"def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()",
"def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")",
"def get_instance(instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n available_maintenance_versions=pulumi.get(__ret__, 'available_maintenance_versions'),\n backend_type=pulumi.get(__ret__, 'backend_type'),\n connection_name=pulumi.get(__ret__, 'connection_name'),\n create_time=pulumi.get(__ret__, 'create_time'),\n current_disk_size=pulumi.get(__ret__, 'current_disk_size'),\n database_installed_version=pulumi.get(__ret__, 'database_installed_version'),\n database_version=pulumi.get(__ret__, 'database_version'),\n disk_encryption_configuration=pulumi.get(__ret__, 'disk_encryption_configuration'),\n disk_encryption_status=pulumi.get(__ret__, 'disk_encryption_status'),\n etag=pulumi.get(__ret__, 'etag'),\n failover_replica=pulumi.get(__ret__, 'failover_replica'),\n gce_zone=pulumi.get(__ret__, 'gce_zone'),\n instance_type=pulumi.get(__ret__, 'instance_type'),\n ip_addresses=pulumi.get(__ret__, 'ip_addresses'),\n ipv6_address=pulumi.get(__ret__, 'ipv6_address'),\n kind=pulumi.get(__ret__, 'kind'),\n maintenance_version=pulumi.get(__ret__, 'maintenance_version'),\n master_instance_name=pulumi.get(__ret__, 'master_instance_name'),\n max_disk_size=pulumi.get(__ret__, 'max_disk_size'),\n name=pulumi.get(__ret__, 'name'),\n on_premises_configuration=pulumi.get(__ret__, 'on_premises_configuration'),\n out_of_disk_report=pulumi.get(__ret__, 'out_of_disk_report'),\n project=pulumi.get(__ret__, 'project'),\n region=pulumi.get(__ret__, 'region'),\n replica_configuration=pulumi.get(__ret__, 'replica_configuration'),\n replica_names=pulumi.get(__ret__, 'replica_names'),\n root_password=pulumi.get(__ret__, 'root_password'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n scheduled_maintenance=pulumi.get(__ret__, 'scheduled_maintenance'),\n secondary_gce_zone=pulumi.get(__ret__, 'secondary_gce_zone'),\n self_link=pulumi.get(__ret__, 'self_link'),\n server_ca_cert=pulumi.get(__ret__, 'server_ca_cert'),\n service_account_email_address=pulumi.get(__ret__, 'service_account_email_address'),\n settings=pulumi.get(__ret__, 'settings'),\n state=pulumi.get(__ret__, 'state'),\n suspension_reason=pulumi.get(__ret__, 'suspension_reason'))",
"def power_on(self, context, instance, network_info, block_device_info):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.start_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def stop_instance(InstanceId=None, Force=None):\n pass",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def _is_instance_running(settings, instance_id_ip,\n ip_given=False):\n instance = get_this_instance(settings,\n instance_id_ip, ip_given)\n if instance:\n if ip_given:\n ip_address = instance_id_ip\n else:\n ip_address = get_instance_ip(instance)\n state = instance.state\n print 'Current status of Instance'\\\n ' with IP [%s]: %s' %(ip_address, state)\n if state == \"running\" and ip_address:\n return True\n return False",
"def assign_instance(InstanceId=None, LayerIds=None):\n pass",
"def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))",
"def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)",
"def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")",
"def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()",
"def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def test_least_busy_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, s_ref['id'])\n db.instance_destroy(self.context, s_ref2['id'])",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise",
"def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)",
"def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))",
"def view_instance(self, instance_name, check=True):\n self.page_instances().table_instances.row(\n name=instance_name).link_instance.click()\n\n if check:\n assert self.app.page_instance.info_instance.label_name.value \\\n == instance_name",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")",
"def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()",
"def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()",
"def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)",
"def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance",
"def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"",
"def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)",
"def start_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n print(\"\\n===Creating EC2 instance.\")\n ec2_client.start_instances(InstanceIds=instances_ids)\n \n # wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"===EC2 instance is ready!\")",
"def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id",
"def nfvi_cold_migrate_instance(instance_uuid, callback, to_host_name=None,\n context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n return cmd_id",
"def _ensureComponentRunning(self, shouldBeRunning):\n for instance in shouldBeRunning:\n self.log.info(\"Starting instance %s\" % instance)\n system, name = instance.split('__')\n if self.controlComponents:\n res = self.sysAdminClient.startComponent(system, name)\n if not res['OK']:\n self.logError(\"Failed to start component:\", \"%s: %s\" % (instance, res['Message']))\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance was down, started instance\"\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance is down, should be started\"",
"def test_launch_volume_as_instance(self, volume, instances_steps,\n volumes_steps):\n instance_name = next(generate_ids('instance'))\n volumes_steps.launch_volume_as_instance(\n volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)\n\n instances_steps.page_instances().table_instances.row(\n name=instance_name).wait_for_status('Active')\n instances_steps.delete_instance(instance_name)",
"def test_run_terminate_no_image(self):\n params = {'image_ref': ''}\n instance = self._create_fake_instance_obj(params)\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n self._assert_state({'vm_state': vm_states.ACTIVE,\n 'task_state': None})\n\n self.compute.terminate_instance(self.context, instance, [])\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 0)",
"def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))",
"def power_off(self, instance, node=None):\n if not node:\n node = _get_baremetal_node_by_instance_uuid(instance['uuid'])\n pm = get_power_manager(node=node, instance=instance)\n pm.deactivate_node()\n if pm.state != baremetal_states.DELETED:\n raise exception.InstancePowerOffFailure(_(\n \"Baremetal power manager failed to stop node \"\n \"for instance %r\") % instance['uuid'])\n pm.stop_console()",
"def terminateInstance(region,zone,instance_id):\n\ttry:\n\t\tec2 = boto.ec2.connect_to_region(region+'-'+zone)\n\t\tec2.terminate_instances(instance_ids=[instance_id])\n\t\treturn True\n\texcept Exception as e:\n\t\tlogError(e)\n\t\treturn False",
"def lock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_lock.click()\n\n if check:\n self.close_notification('success')",
"def create_instance(self, nova, image_name, instance_name, flavor):\n image = nova.images.find(name=image_name)\n flavor = nova.flavors.find(name=flavor)\n instance = nova.servers.create(name=instance_name, image=image,\n flavor=flavor)\n\n count = 1\n status = instance.status\n while status != 'ACTIVE' and count < 60:\n time.sleep(3)\n instance = nova.servers.get(instance.id)\n status = instance.status\n self.log.debug('instance status: {}'.format(status))\n count += 1\n\n if status != 'ACTIVE':\n self.log.error('instance creation timed out')\n return None\n\n return instance",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2"
] |
[
"0.6103962",
"0.6098033",
"0.6030681",
"0.5840055",
"0.5827348",
"0.5759573",
"0.573131",
"0.5658811",
"0.56404024",
"0.5575826",
"0.5560752",
"0.55465156",
"0.55443525",
"0.55416125",
"0.5493635",
"0.547756",
"0.5457497",
"0.5455699",
"0.544469",
"0.5434933",
"0.5429983",
"0.54267335",
"0.539365",
"0.5393433",
"0.53797615",
"0.537584",
"0.5370196",
"0.5355971",
"0.5355636",
"0.5334652",
"0.5321105",
"0.5316858",
"0.53083724",
"0.5305453",
"0.5304866",
"0.5292876",
"0.52757645",
"0.5271434",
"0.52662337",
"0.5265017",
"0.52577025",
"0.5243351",
"0.5243275",
"0.52262485",
"0.52257377",
"0.521436",
"0.51886684",
"0.5188107",
"0.51816326",
"0.5180665",
"0.5177608",
"0.516502",
"0.5158531",
"0.5154697",
"0.5143252",
"0.5142074",
"0.51383775",
"0.5138149",
"0.513181",
"0.51279134",
"0.51215106",
"0.51203763",
"0.511938",
"0.511757",
"0.51006293",
"0.50997144",
"0.5099401",
"0.50981337",
"0.5079729",
"0.5069888",
"0.50665575",
"0.50640047",
"0.50559145",
"0.50556356",
"0.5048151",
"0.50374115",
"0.5037005",
"0.5029855",
"0.5026477",
"0.50167537",
"0.5009695",
"0.5008375",
"0.50083745",
"0.50069326",
"0.49938774",
"0.49855012",
"0.49715322",
"0.4967935",
"0.49652657",
"0.49644086",
"0.4954581",
"0.49541116",
"0.49508214",
"0.49400347",
"0.4938206",
"0.4933356",
"0.49329063",
"0.49293455",
"0.49261558",
"0.49167046",
"0.49086618"
] |
0.0
|
-1
|
The instance must be running when you call this operation. > This operation is applicable to replica set instances and sharded cluster instances, but cannot be performed on standalone instances. On replica set instances, the switch is performed between instances. On sharded cluster instances, the switch is performed between shards.
|
def switch_dbinstance_ha(
self,
request: dds_20151201_models.SwitchDBInstanceHARequest,
) -> dds_20151201_models.SwitchDBInstanceHAResponse:
runtime = util_models.RuntimeOptions()
return self.switch_dbinstance_hawith_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reboot_instance(InstanceId=None):\n pass",
"def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'",
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def start_instance(InstanceId=None):\n pass",
"def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')",
"def test_specific_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n compute1 = self.start_service('compute', host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='1')\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)",
"def lock_instance(vm_id):\n cli.openstack(cmd='server lock', positional_args=vm_id)",
"def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)",
"def test_lock_instance(self, instance, instances_steps):\n instances_steps.lock_instance(instance.name)\n instances_steps.unlock_instance(instance.name)",
"def _maybe_restart_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Compute service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n # Do nothing if the status is not RUNNING to avoid race. This will cover\n # most of the cases.\n if status == COMPUTE_STATUS_RUNNING:\n logging.info('Stopping GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().stop(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n enqueue_start_task(instance, zone)",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def __switchToPrimary(self, datadir):\n cmd = ClusterCommand.getSwitchOverCmd(self.user, self.dbNodeInfo.id, datadir)\n self.logger.debug(\"Switch to primary: %s\" % cmd)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Switch instance to be primary failed!Datadir %s.\\nOutput: %s\" % (datadir, output))",
"def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def start_notebook_instance(NotebookInstanceName=None):\n pass",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)",
"def SSHToInstance(self, args, instance):\n args = self._DefaultArgsForSSH(args)\n\n external_nat = ssh_utils.GetExternalIPAddress(instance)\n log.status.Print(\n 'Trying to SSH to VM with NAT IP:{}'.format(external_nat))\n args.ssh_key_file = ssh.Keys.DEFAULT_KEY_FILE\n\n ssh_helper = ssh_utils.BaseSSHCLIHelper()\n ssh_helper.Run(args)\n identity_file = ssh_helper.keys.key_file\n\n user, _ = ssh_utils.GetUserAndInstance(args.name)\n host_keys = self._GetHostKeyFromInstance(args.zone, ssh_helper, instance)\n options = self._GetSSHOptions(args.name, ssh_helper,\n instance, host_keys)\n\n public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)\n oslogin_state = ssh.GetOsloginState(\n instance,\n ssh_helper.GetProject(\n self.client, properties.VALUES.core.project.Get(required=True)),\n user,\n public_key,\n None,\n self.release_track,\n username_requested=False,\n messages=self.client.messages)\n user = oslogin_state.user\n\n remote = ssh.Remote(external_nat, user)\n if not oslogin_state.oslogin_enabled:\n self._WaitForSSHKeysToPropagate(ssh_helper, remote, identity_file, user,\n instance, options)\n\n extra_flags = []\n # Ctpu seems to be forwarding some other ports on what\n # seems like the TPU node. Need to understand better before enabling.\n if args.forward_ports:\n extra_flags.extend(\n ['-A', '-L', '6006:localhost:6006', '-L', '8888:localhost:8888'])\n ssh_cmd_args = {\n 'remote': remote,\n 'identity_file': identity_file,\n 'options': options,\n 'extra_flags': extra_flags\n }\n\n cmd = ssh.SSHCommand(**ssh_cmd_args)\n max_attempts = 10\n sleep_interval = 30\n # Since the instance was just created, it can take a while for the instance\n # to be ready to accept ssh connections, therefore retry up to 5m. Doesn't\n # need to be backed off, regular interval retry is sufficient since we\n # aren't looking to throttle.\n for i in range(max_attempts):\n try:\n log.status.Print('SSH Attempt #{}...'.format(i))\n # Errors from SSH itself result in an ssh.CommandError being raised\n return_code = cmd.Run(\n ssh_helper.env,\n putty_force_connect=properties.VALUES.ssh.putty_force_connect.GetBool())\n if return_code:\n # This is the return code of the remote command.\n # Problems with SSH itself will result in ssh.CommandError\n # being raised above.\n sys.exit(return_code)\n except ssh.CommandError as e:\n if i == max_attempts - 1:\n raise e\n log.status.Print(\n 'Retrying: SSH command error: {}'.format(six.text_type(e)))\n time.sleep(sleep_interval)\n continue\n break",
"def spawn(self, context, instance,\n network_info=None, block_device_info=None):\n LOG.debug(\"spawn\")\n\n instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance[\"availability_zone\"])\n\n # update instances table\n bmm, reuse = self._select_machine(context, instance)\n instance[\"display_name\"] = bmm[\"name\"]\n instance[\"availability_zone\"] = instance_zone\n db.instance_update(context, \n instance[\"id\"], \n {\"display_name\": bmm[\"name\"],\n \"availability_zone\": instance_zone})\n if vlan_id:\n db.bmm_update(context, bmm[\"id\"], {\"availability_zone\": cluster_name, \n \"vlan_id\": vlan_id,\n \"service_ip\": None})\n \n if instance_zone == \"resource_pool\":\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n else: \n self._update_ofc(bmm, cluster_name)\n if bmm[\"instance_id\"]:\n db.instance_destroy(context, bmm[\"instance_id\"])\n\n if reuse:\n db.bmm_update(context, bmm[\"id\"], {\"status\": \"used\", \n \"instance_id\": instance[\"id\"]}) \n else:\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n \n if instance[\"key_data\"]:\n self._inject_key(bmm[\"pxe_ip\"], str(instance[\"key_data\"]))",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True",
"def test_specific_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='2')\n\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")",
"def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)",
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")",
"def test_update_instances_schedule_state(self):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def _maybe_start_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Unable to start Compute instance, service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n if status == COMPUTE_STATUS_TERMINATED:\n logging.info('Starting GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().start(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n\n if status != COMPUTE_STATUS_RUNNING:\n # If in an intermediate state: PROVISIONING, STAGING, STOPPING, requeue\n # the task to check back later. If in TERMINATED state, also requeue the\n # task since the start attempt may fail and we should retry.\n enqueue_start_task(instance, zone)",
"def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def test_view_instance(self, instance, instances_steps):\n instances_steps.view_instance(instance.name)",
"def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()",
"def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")",
"def get_instance(instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n available_maintenance_versions=pulumi.get(__ret__, 'available_maintenance_versions'),\n backend_type=pulumi.get(__ret__, 'backend_type'),\n connection_name=pulumi.get(__ret__, 'connection_name'),\n create_time=pulumi.get(__ret__, 'create_time'),\n current_disk_size=pulumi.get(__ret__, 'current_disk_size'),\n database_installed_version=pulumi.get(__ret__, 'database_installed_version'),\n database_version=pulumi.get(__ret__, 'database_version'),\n disk_encryption_configuration=pulumi.get(__ret__, 'disk_encryption_configuration'),\n disk_encryption_status=pulumi.get(__ret__, 'disk_encryption_status'),\n etag=pulumi.get(__ret__, 'etag'),\n failover_replica=pulumi.get(__ret__, 'failover_replica'),\n gce_zone=pulumi.get(__ret__, 'gce_zone'),\n instance_type=pulumi.get(__ret__, 'instance_type'),\n ip_addresses=pulumi.get(__ret__, 'ip_addresses'),\n ipv6_address=pulumi.get(__ret__, 'ipv6_address'),\n kind=pulumi.get(__ret__, 'kind'),\n maintenance_version=pulumi.get(__ret__, 'maintenance_version'),\n master_instance_name=pulumi.get(__ret__, 'master_instance_name'),\n max_disk_size=pulumi.get(__ret__, 'max_disk_size'),\n name=pulumi.get(__ret__, 'name'),\n on_premises_configuration=pulumi.get(__ret__, 'on_premises_configuration'),\n out_of_disk_report=pulumi.get(__ret__, 'out_of_disk_report'),\n project=pulumi.get(__ret__, 'project'),\n region=pulumi.get(__ret__, 'region'),\n replica_configuration=pulumi.get(__ret__, 'replica_configuration'),\n replica_names=pulumi.get(__ret__, 'replica_names'),\n root_password=pulumi.get(__ret__, 'root_password'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n scheduled_maintenance=pulumi.get(__ret__, 'scheduled_maintenance'),\n secondary_gce_zone=pulumi.get(__ret__, 'secondary_gce_zone'),\n self_link=pulumi.get(__ret__, 'self_link'),\n server_ca_cert=pulumi.get(__ret__, 'server_ca_cert'),\n service_account_email_address=pulumi.get(__ret__, 'service_account_email_address'),\n settings=pulumi.get(__ret__, 'settings'),\n state=pulumi.get(__ret__, 'state'),\n suspension_reason=pulumi.get(__ret__, 'suspension_reason'))",
"def power_on(self, context, instance, network_info, block_device_info):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.start_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def stop_instance(InstanceId=None, Force=None):\n pass",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')",
"def _is_instance_running(settings, instance_id_ip,\n ip_given=False):\n instance = get_this_instance(settings,\n instance_id_ip, ip_given)\n if instance:\n if ip_given:\n ip_address = instance_id_ip\n else:\n ip_address = get_instance_ip(instance)\n state = instance.state\n print 'Current status of Instance'\\\n ' with IP [%s]: %s' %(ip_address, state)\n if state == \"running\" and ip_address:\n return True\n return False",
"def assign_instance(InstanceId=None, LayerIds=None):\n pass",
"def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))",
"def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)",
"def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")",
"def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()",
"def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")",
"def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def test_least_busy_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, s_ref['id'])\n db.instance_destroy(self.context, s_ref2['id'])",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise",
"def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)",
"def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))",
"def view_instance(self, instance_name, check=True):\n self.page_instances().table_instances.row(\n name=instance_name).link_instance.click()\n\n if check:\n assert self.app.page_instance.info_instance.label_name.value \\\n == instance_name",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")",
"def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()",
"def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()",
"def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)",
"def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance",
"def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"",
"def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)",
"def start_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n print(\"\\n===Creating EC2 instance.\")\n ec2_client.start_instances(InstanceIds=instances_ids)\n \n # wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"===EC2 instance is ready!\")",
"def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id",
"def nfvi_cold_migrate_instance(instance_uuid, callback, to_host_name=None,\n context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n return cmd_id",
"def _ensureComponentRunning(self, shouldBeRunning):\n for instance in shouldBeRunning:\n self.log.info(\"Starting instance %s\" % instance)\n system, name = instance.split('__')\n if self.controlComponents:\n res = self.sysAdminClient.startComponent(system, name)\n if not res['OK']:\n self.logError(\"Failed to start component:\", \"%s: %s\" % (instance, res['Message']))\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance was down, started instance\"\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance is down, should be started\"",
"def test_launch_volume_as_instance(self, volume, instances_steps,\n volumes_steps):\n instance_name = next(generate_ids('instance'))\n volumes_steps.launch_volume_as_instance(\n volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)\n\n instances_steps.page_instances().table_instances.row(\n name=instance_name).wait_for_status('Active')\n instances_steps.delete_instance(instance_name)",
"def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))",
"def test_run_terminate_no_image(self):\n params = {'image_ref': ''}\n instance = self._create_fake_instance_obj(params)\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n self._assert_state({'vm_state': vm_states.ACTIVE,\n 'task_state': None})\n\n self.compute.terminate_instance(self.context, instance, [])\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 0)",
"def power_off(self, instance, node=None):\n if not node:\n node = _get_baremetal_node_by_instance_uuid(instance['uuid'])\n pm = get_power_manager(node=node, instance=instance)\n pm.deactivate_node()\n if pm.state != baremetal_states.DELETED:\n raise exception.InstancePowerOffFailure(_(\n \"Baremetal power manager failed to stop node \"\n \"for instance %r\") % instance['uuid'])\n pm.stop_console()",
"def terminateInstance(region,zone,instance_id):\n\ttry:\n\t\tec2 = boto.ec2.connect_to_region(region+'-'+zone)\n\t\tec2.terminate_instances(instance_ids=[instance_id])\n\t\treturn True\n\texcept Exception as e:\n\t\tlogError(e)\n\t\treturn False",
"def lock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_lock.click()\n\n if check:\n self.close_notification('success')",
"def create_instance(self, nova, image_name, instance_name, flavor):\n image = nova.images.find(name=image_name)\n flavor = nova.flavors.find(name=flavor)\n instance = nova.servers.create(name=instance_name, image=image,\n flavor=flavor)\n\n count = 1\n status = instance.status\n while status != 'ACTIVE' and count < 60:\n time.sleep(3)\n instance = nova.servers.get(instance.id)\n status = instance.status\n self.log.debug('instance status: {}'.format(status))\n count += 1\n\n if status != 'ACTIVE':\n self.log.error('instance creation timed out')\n return None\n\n return instance",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2"
] |
[
"0.61040485",
"0.6096324",
"0.60282654",
"0.58379394",
"0.58264416",
"0.5758901",
"0.57290405",
"0.56599015",
"0.56398165",
"0.55757517",
"0.55595875",
"0.55456",
"0.5543997",
"0.5541205",
"0.5492753",
"0.5477153",
"0.54576474",
"0.54539895",
"0.54452765",
"0.5433373",
"0.5427934",
"0.54262674",
"0.5394101",
"0.53921956",
"0.53793555",
"0.5374121",
"0.53694385",
"0.5356032",
"0.5355845",
"0.5334702",
"0.531998",
"0.5316969",
"0.53069645",
"0.5305549",
"0.5303321",
"0.52937686",
"0.5276283",
"0.5269501",
"0.52649236",
"0.5262889",
"0.525724",
"0.52433527",
"0.52421004",
"0.52251714",
"0.5224862",
"0.5213376",
"0.5189451",
"0.5187047",
"0.51805925",
"0.5180383",
"0.51763123",
"0.51650786",
"0.51582605",
"0.51564556",
"0.5142721",
"0.5141481",
"0.513841",
"0.5136372",
"0.5131935",
"0.51271975",
"0.5120529",
"0.51199496",
"0.5117998",
"0.5117303",
"0.50995845",
"0.5099351",
"0.5097509",
"0.50974715",
"0.50796443",
"0.50686026",
"0.5065488",
"0.50626326",
"0.5055086",
"0.5055079",
"0.5047959",
"0.50378907",
"0.50366485",
"0.5028522",
"0.50255674",
"0.5016331",
"0.50097466",
"0.5007584",
"0.5006492",
"0.5005757",
"0.4992329",
"0.49847522",
"0.49704602",
"0.49665394",
"0.49637017",
"0.4962652",
"0.49552613",
"0.49530962",
"0.4949279",
"0.49383482",
"0.49379858",
"0.4935344",
"0.49324194",
"0.49296668",
"0.4925811",
"0.49148324",
"0.49076656"
] |
0.0
|
-1
|
The instance must be running when you call this operation. > This operation is applicable to replica set instances and sharded cluster instances, but cannot be performed on standalone instances. On replica set instances, the switch is performed between instances. On sharded cluster instances, the switch is performed between shards.
|
async def switch_dbinstance_ha_async(
self,
request: dds_20151201_models.SwitchDBInstanceHARequest,
) -> dds_20151201_models.SwitchDBInstanceHAResponse:
runtime = util_models.RuntimeOptions()
return await self.switch_dbinstance_hawith_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reboot_instance(InstanceId=None):\n pass",
"def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'",
"def run_instance():\n ami_id = \"ami-04876f29fd3a5e8ba\" # AMI Id\n instance_type = \"t2.micro\" # Instance Type\n tag_specs = [\n {\n 'ResourceType': 'instance',\n 'Tags': [\n {\n 'Key': 'Name',\n 'Value': 'BoB10@ProductDev.sdk.8084'\n }\n ]\n }\n ]\n\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n # Run an instance\n\n instances = ec2_resource.create_instances(ImageId=ami_id, InstanceType=instance_type,\n MaxCount=1, MinCount=1, KeyName='BoB10@ProductDev.8084',\n TagSpecifications=tag_specs, \n SecurityGroupIds=['sg-06b757b4bb272d98f'],\n UserData=assemble_userdata().as_string())\n Instance_id = instances[0].id\n print('\\nInstance Id: ' + Instance_id)\n print('Image Id: ' + instances[0].image_id)\n print('Instance Type: ' + instances[0].instance_type)\n print('State: ' + instances[0].state['Name'])\n return Instance_id",
"def start_instance(InstanceId=None):\n pass",
"def stop_instance():\n send_line('stop instance')\n os.system(f'gcloud compute instances stop {os.uname()[1]} --zone us-east1-b')",
"def check_status():\n js = _get_jetstream_conn()\n i = js.compute.instances.get(session.attributes.get('instance_id'))\n if not i:\n return question(\"There was a problem. Please retry your command.\")\n\n status = i.state\n if session.attributes['status'] != status:\n msg = \"New instance status is {0}.\".format(status)\n if not session.attributes['public_ip'] and status == 'running':\n # Attach a floating IP to the instance\n fip = None\n fips = js.network.floating_ips()\n for ip in fips:\n if not ip.in_use():\n fip = ip\n if fip:\n i.add_floating_ip(fip.public_ip)\n session.attributes['public_ip'] = fip.public_ip\n else:\n msg = \"Instance status is {0}\".format(status)\n\n session.attributes['status'] = status\n\n if session.attributes['status'] != 'running':\n q = \"Would you like to check the status again?\"\n return question(msg + q).reprompt(q)\n else:\n card_content = 'Access your instance at http://{0}'.format(\n session.attributes.get('public_ip'))\n return statement(msg).simple_card(\n title=\"Instance {0} was launched.\".format(i.name),\n content=msg + card_content)",
"def instance_action():\n data = check_args(\n ('cloudProvider', 'apiKey', 'secretKey', 'instanceAction',\n 'instanceId')\n )\n job = jobs.instance_action.apply_async(args=(data,))\n return make_response(job_id=job.id)",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def testMigrateAnInstanceInAnInstanceGroup(self):\n ### create test resources\n instance_name_1 = 'end-to-end-test-instance-1'\n instance_selfLink_1 = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name_1,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n unmanaged_instance_group_name = 'end-to-end-test-unmanaged-instance-group-1'\n self.test_resource_creator.create_unmanaged_instance_group(\n unmanaged_instance_group_name,\n [instance_name_1])\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute,\n instance_selfLink_1,\n self.test_resource_creator.network_name,\n self.test_resource_creator.subnetwork_name,\n False)\n\n with self.assertRaises(AmbiguousTargetResource):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n ### check migration result\n # the migration never starts, so the config didn't change\n new_config = self.google_api_interface.get_instance_configs(\n instance_name_1)\n self.assertEqual(new_config, original_config)\n print('Pass the current test')",
"def test_specific_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n compute1 = self.start_service('compute', host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='1')\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def test_restart_service_should_return_active(self):\n instance_info.dbaas.instances.restart(instance_info.id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)\n\n def result_is_active():\n instance = instance_info.dbaas.instances.get(\n instance_info.id)\n if instance.status in CONFIG.running_status:\n return True\n else:\n assert_equal(\"REBOOT\", instance.status)\n return False\n poll_until(result_is_active)",
"def lock_instance(vm_id):\n cli.openstack(cmd='server lock', positional_args=vm_id)",
"def launch_on_jetstream():\n launched = launch_instance(\"Jetstream\")\n session.attributes['instance_id'] = launched.id\n session.attributes['public_ip'] = None\n session.attributes['status'] = None\n\n msg = \"An instance is starting. Would you like to check its status?\"\n return question(msg)",
"def test_lock_instance(self, instance, instances_steps):\n instances_steps.lock_instance(instance.name)\n instances_steps.unlock_instance(instance.name)",
"def _maybe_restart_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Compute service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n # Do nothing if the status is not RUNNING to avoid race. This will cover\n # most of the cases.\n if status == COMPUTE_STATUS_RUNNING:\n logging.info('Stopping GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().stop(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n enqueue_start_task(instance, zone)",
"def launch_instance_nonvpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n security_group,\n machine_type = 'm1.small',\n user_data = None,\n wait_for_running = True ) :\n instance_r = ami.run( key_name = keypair,\n instance_type = machine_type,\n security_groups = [ security_group ],\n user_data = user_data )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n if wait_for_running :\n running = wait_on_object_state( instance, 'running', failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n return instance",
"def __switchToPrimary(self, datadir):\n cmd = ClusterCommand.getSwitchOverCmd(self.user, self.dbNodeInfo.id, datadir)\n self.logger.debug(\"Switch to primary: %s\" % cmd)\n (status, output) = commands.getstatusoutput(cmd)\n if (status != 0):\n self.logger.logExit(\"Switch instance to be primary failed!Datadir %s.\\nOutput: %s\" % (datadir, output))",
"def test_regular_user_can_schedule(self):\n\n s_ref = self._create_compute_service(host='host1')\n instance_id = self._create_instance()\n ctxt = context.RequestContext('fake', 'fake', False)\n self.scheduler.driver.schedule_run_instance(ctxt, instance_id)\n db.instance_destroy(self.context, s_ref['id'])",
"def test_instance_running(self) -> None:\n if self.prod_env:\n ec2_name = 'saints-xctf-server-prod-asg'\n else:\n ec2_name = 'saints-xctf-server-dev-asg'\n\n instances = self.get_ec2(ec2_name)\n self.assertTrue(len(instances) > 0)",
"def stop_instance(self):\n instance_id = self._choose_among_running_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Stopping the instance \"%s\"' % instance_id\n self.compute.stop_instance(instance_id)\n print 'The instance has been stopped'",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def active():\n if env.get('active_instance'):\n print \"Active Instance: \" + env.get('active_instance')\n else:\n print \"No active instance\"",
"def start_notebook_instance(NotebookInstanceName=None):\n pass",
"def modifyMastersWithMultipleInstances(self):\n # Nothing to do\n pass",
"def run(self):\n client = self._get_client()\n metadata = self._metadata\n\n if str(metadata.get('cluster')) == str(self._cluster_id):\n if str(self._uuid) == str(self._node_id):\n #hypervisor_hostname = client.servers.find(\n # id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']\n hypervisor_hostname = client.servers.find(\n id=self._uuid).to_dict()['OS-EXT-SRV-ATTR:host']\n data = {\n 'migrate': True,\n 'uuid': self._uuid,\n 'hypervisor_hostname': hypervisor_hostname,\n 'flavor_id': self._flavor\n }\n return Result(data)\n\n data = {\n 'migrate': False,\n 'uuid': self._uuid,\n 'hypervisor_hostname': '',\n 'flavor_id':self._flavor\n }\n return Result(data)",
"def SSHToInstance(self, args, instance):\n args = self._DefaultArgsForSSH(args)\n\n external_nat = ssh_utils.GetExternalIPAddress(instance)\n log.status.Print(\n 'Trying to SSH to VM with NAT IP:{}'.format(external_nat))\n args.ssh_key_file = ssh.Keys.DEFAULT_KEY_FILE\n\n ssh_helper = ssh_utils.BaseSSHCLIHelper()\n ssh_helper.Run(args)\n identity_file = ssh_helper.keys.key_file\n\n user, _ = ssh_utils.GetUserAndInstance(args.name)\n host_keys = self._GetHostKeyFromInstance(args.zone, ssh_helper, instance)\n options = self._GetSSHOptions(args.name, ssh_helper,\n instance, host_keys)\n\n public_key = ssh_helper.keys.GetPublicKey().ToEntry(include_comment=True)\n oslogin_state = ssh.GetOsloginState(\n instance,\n ssh_helper.GetProject(\n self.client, properties.VALUES.core.project.Get(required=True)),\n user,\n public_key,\n None,\n self.release_track,\n username_requested=False,\n messages=self.client.messages)\n user = oslogin_state.user\n\n remote = ssh.Remote(external_nat, user)\n if not oslogin_state.oslogin_enabled:\n self._WaitForSSHKeysToPropagate(ssh_helper, remote, identity_file, user,\n instance, options)\n\n extra_flags = []\n # Ctpu seems to be forwarding some other ports on what\n # seems like the TPU node. Need to understand better before enabling.\n if args.forward_ports:\n extra_flags.extend(\n ['-A', '-L', '6006:localhost:6006', '-L', '8888:localhost:8888'])\n ssh_cmd_args = {\n 'remote': remote,\n 'identity_file': identity_file,\n 'options': options,\n 'extra_flags': extra_flags\n }\n\n cmd = ssh.SSHCommand(**ssh_cmd_args)\n max_attempts = 10\n sleep_interval = 30\n # Since the instance was just created, it can take a while for the instance\n # to be ready to accept ssh connections, therefore retry up to 5m. Doesn't\n # need to be backed off, regular interval retry is sufficient since we\n # aren't looking to throttle.\n for i in range(max_attempts):\n try:\n log.status.Print('SSH Attempt #{}...'.format(i))\n # Errors from SSH itself result in an ssh.CommandError being raised\n return_code = cmd.Run(\n ssh_helper.env,\n putty_force_connect=properties.VALUES.ssh.putty_force_connect.GetBool())\n if return_code:\n # This is the return code of the remote command.\n # Problems with SSH itself will result in ssh.CommandError\n # being raised above.\n sys.exit(return_code)\n except ssh.CommandError as e:\n if i == max_attempts - 1:\n raise e\n log.status.Print(\n 'Retrying: SSH command error: {}'.format(six.text_type(e)))\n time.sleep(sleep_interval)\n continue\n break",
"def spawn(self, context, instance,\n network_info=None, block_device_info=None):\n LOG.debug(\"spawn\")\n\n instance_zone, cluster_name, vlan_id, create_cluster = self._parse_zone(instance[\"availability_zone\"])\n\n # update instances table\n bmm, reuse = self._select_machine(context, instance)\n instance[\"display_name\"] = bmm[\"name\"]\n instance[\"availability_zone\"] = instance_zone\n db.instance_update(context, \n instance[\"id\"], \n {\"display_name\": bmm[\"name\"],\n \"availability_zone\": instance_zone})\n if vlan_id:\n db.bmm_update(context, bmm[\"id\"], {\"availability_zone\": cluster_name, \n \"vlan_id\": vlan_id,\n \"service_ip\": None})\n \n if instance_zone == \"resource_pool\":\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n else: \n self._update_ofc(bmm, cluster_name)\n if bmm[\"instance_id\"]:\n db.instance_destroy(context, bmm[\"instance_id\"])\n\n if reuse:\n db.bmm_update(context, bmm[\"id\"], {\"status\": \"used\", \n \"instance_id\": instance[\"id\"]}) \n else:\n self._install_machine(context, instance, bmm, cluster_name, vlan_id)\n \n if instance[\"key_data\"]:\n self._inject_key(bmm[\"pxe_ip\"], str(instance[\"key_data\"]))",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def test_least_busy_host_gets_instance(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def power_on(self, ec2_session, ami_id):\n instance = self.aws_api.get_instance_by_id(ec2_session, ami_id)\n instance.start()\n self.instance_waiter.wait(instance, self.instance_waiter.RUNNING)\n return True",
"def test_specific_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1', memory_mb='2')\n\n instance_id2 = self._create_instance(availability_zone='nova:host1')\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual('host1', host)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, instance_id2)\n db.service_destroy(self.context, s_ref['id'])\n db.service_destroy(self.context, s_ref2['id'])",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def main(self, cmd_args):\n reservations = self._ec2_client.describe_instances(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]},\n {'Name': \"tag:Name\", 'Values': [cmd_args.name]}\n ])['Reservations']\n if not reservations:\n halt.err(\"No instances matching given parameters found.\")\n instance_state = reservations[0]['Instances'][0]['State']['Name']\n if instance_state in (\"shutting-down\", \"terminated\"):\n halt.err(\"Instance has already been terminated.\")\n\n addresses = self._ec2_client.describe_addresses(Filters=[\n {'Name': \"instance-id\", 'Values': [cmd_args.id]}\n ])['Addresses']\n print(\"\")\n if addresses:\n self._disassociate_addresses(addresses, cmd_args.save_ips)\n elif cmd_args.save_ips is True:\n print(\"No elastic IPs associated with instance.\")\n\n self._ec2_client.terminate_instances(InstanceIds=[cmd_args.id])\n print(\"Instance termination process started.\")",
"def set_instance(self, env, instance, modify_existing):\n\n logger = env.get_logger()\n logger.log_debug('Entering %s.set_instance()' % self.__class__.__name__)\n # TODO create or modify the instance\n raise pywbem.CIMError(pywbem.CIM_ERR_NOT_SUPPORTED) # Remove to implement\n return instance",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)",
"def access_spot_instance_state() -> Response:\n retry_session = requests_retry_session()\n response = retry_session.get(INSTANCE_ACTION_URL)\n return response",
"def run():\r\n if args.action == \"create\":\r\n if args.customer_id and args.node_type:\r\n my_aws = createaws()\r\n node_id = my_aws.create_ec2_instance(args.customer_id, args.node_type)\r\n print(\"Node Created: \", node_id, \"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Creation\")\r\n return False\r\n\r\n elif args.action == \"list-nodes\":\r\n if args.customer_id:\r\n my_aws = createaws()\r\n instance_lst = my_aws.get_instance_by_customer_id(args.customer_id)\r\n print(\"Customer\", args.customer_id, \"has \" + str(len(instance_lst)) + \" Instances: \", \",\".join(instance_lst)\r\n ,\"\\n\")\r\n return True\r\n else:\r\n print(\"Missed command parameters for Instance Listing\")\r\n return False\r\n\r\n elif args.action == \"list-all\":\r\n my_aws = createaws()\r\n cust_inst_ip = my_aws.get_all_instances()\r\n print(\"All the Instances: customer_id, instance_id, instance_ip formatted\\n\")\r\n if len(cust_inst_ip) > 0:\r\n for rec in cust_inst_ip:\r\n print(', '.join(rec))\r\n else:\r\n print(\"No Instances!\")\r\n return False\r\n return True\r\n\r\n elif args.action == \"execute\":\r\n instance_ids, succ_id_list, not_worked_is_list, outs = [], [], [], []\r\n if args.script and (args.customer_id or args.node_type):\r\n my_aws = createaws()\r\n commands = args.script\r\n if args.customer_id:\r\n instance_ids.extend(my_aws.get_instance_by_customer_id(args.customer_id))\r\n if args.node_type:\r\n instance_ids.extend(my_aws.get_instance_by_node_type(args.node_type))\r\n instance_ids = list(set(instance_ids))\r\n\r\n succ_id_list, not_worked_is_list, outs = \\\r\n my_aws.execute_commands_on_linux_instances(commands, instance_ids)\r\n print(\"\\nInstances that run the commands:\\n\", '\\n '.join(succ_id_list))\r\n print(\"\\nInstances that don't run the commands: (Instance is not running or its SSM agent doesn't work or \"\r\n \"command couldn't be executed\\n\", '\\n '.join(not_worked_is_list))\r\n print(\"\\nOutputs of the Instances that run the commands:\")\r\n for i in outs:\r\n print(\"\\n\")\r\n for k, v in dict(i).items():\r\n print(str(k).lstrip(), \"-->\", str(v).replace('\\n', \"\"))\r\n return True\r\n else:\r\n print(\"Missed command parameters for Execution on Instance\")\r\n return False\r\n\r\n elif args.action == \"backup\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n s_id = my_aws.make_backup(args.node_id)\r\n print(s_id)\r\n else:\r\n return False\r\n\r\n elif args.action == \"list-backups\":\r\n if args.node_id:\r\n my_aws = createaws()\r\n backup_list = my_aws.list_backups(args.node_id)\r\n if len(backup_list) > 0:\r\n for rec in backup_list:\r\n print(', '.join(rec))\r\n return True\r\n else:\r\n print(\"Snapshot yok !\")\r\n return True\r\n else:\r\n return False\r\n\r\n elif args.action == \"roll-back\":\r\n if args.backup_id:\r\n my_aws = createaws()\r\n my_aws.roll_back(args.backup_id, args.node_id)\r\n elif args.action == \"terminate-all\":\r\n my_aws = createaws()\r\n my_aws.terminate_instances('ALL')\r\n else:\r\n print(\"Please select a proper action\")",
"def test_update_instances_schedule_state(self):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def select_instance(state='running'):\n if env.get('active_instance', False):\n return\n\n list_aws_instances(state=state)\n\n prompt_text = \"Please select from the following instances:\\n\"\n instance_template = \" %(ct)d: %(state)s instance %(id)s\\n\"\n for idx, instance in enumerate(env.instances):\n ct = idx + 1\n args = {'ct': ct}\n args.update(instance)\n prompt_text += instance_template % args\n prompt_text += \"Choose an instance: \"\n\n def validation(input):\n choice = int(input)\n if not choice in range(1, len(env.instances) + 1):\n raise ValueError(\"%d is not a valid instance\" % choice)\n return choice\n\n choice = prompt(prompt_text, validate=validation)\n env.active_instance = env.instances[choice - 1]['instance']\n print env.active_instance",
"def _maybe_start_instance(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Unable to start Compute instance, service unavailable.')\n return\n\n status = self._compute_status(instance, zone)\n\n logging.info('GCE VM \\'%s (%s)\\' status: \\'%s\\'.',\n instance, zone, status)\n\n if status == COMPUTE_STATUS_TERMINATED:\n logging.info('Starting GCE VM: %s (%s)', instance, zone)\n self.compute_service.instances().start(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n\n if status != COMPUTE_STATUS_RUNNING:\n # If in an intermediate state: PROVISIONING, STAGING, STOPPING, requeue\n # the task to check back later. If in TERMINATED state, also requeue the\n # task since the start attempt may fail and we should retry.\n enqueue_start_task(instance, zone)",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def launch_instance(ec2, ami, itype, kp_name, sec_group_name):\n\n\n instance = ec2.run_instances(\n ami,\n key_name=kp_name,\n instance_type=itype,\n security_groups=[sec_group_name]\n ).instances[0]\n\n while instance.state != 'running':\n sys.stdout.write('Waiting for instance: {}, at DNS: {} to start\\n'.format(instance.id,\n str(instance.dns_name).split('.')[0]))\n time.sleep(5)\n instance.update()\n\n sys.stdout.write('\\nSuccess! EC2 Instance Launched \\nInstance_Type: {} in {}'.format(instance.instance_type,\n instance.placement))\n return instance",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def test_view_instance(self, instance, instances_steps):\n instances_steps.view_instance(instance.name)",
"def start_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Starting up a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"start\"])\n popdir()",
"def failover_replica(self) -> 'outputs.InstanceFailoverReplicaResponse':\n return pulumi.get(self, \"failover_replica\")",
"def get_instance(instance: Optional[str] = None,\n project: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:\n __args__ = dict()\n __args__['instance'] = instance\n __args__['project'] = project\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('google-native:sqladmin/v1:getInstance', __args__, opts=opts, typ=GetInstanceResult).value\n\n return AwaitableGetInstanceResult(\n available_maintenance_versions=pulumi.get(__ret__, 'available_maintenance_versions'),\n backend_type=pulumi.get(__ret__, 'backend_type'),\n connection_name=pulumi.get(__ret__, 'connection_name'),\n create_time=pulumi.get(__ret__, 'create_time'),\n current_disk_size=pulumi.get(__ret__, 'current_disk_size'),\n database_installed_version=pulumi.get(__ret__, 'database_installed_version'),\n database_version=pulumi.get(__ret__, 'database_version'),\n disk_encryption_configuration=pulumi.get(__ret__, 'disk_encryption_configuration'),\n disk_encryption_status=pulumi.get(__ret__, 'disk_encryption_status'),\n etag=pulumi.get(__ret__, 'etag'),\n failover_replica=pulumi.get(__ret__, 'failover_replica'),\n gce_zone=pulumi.get(__ret__, 'gce_zone'),\n instance_type=pulumi.get(__ret__, 'instance_type'),\n ip_addresses=pulumi.get(__ret__, 'ip_addresses'),\n ipv6_address=pulumi.get(__ret__, 'ipv6_address'),\n kind=pulumi.get(__ret__, 'kind'),\n maintenance_version=pulumi.get(__ret__, 'maintenance_version'),\n master_instance_name=pulumi.get(__ret__, 'master_instance_name'),\n max_disk_size=pulumi.get(__ret__, 'max_disk_size'),\n name=pulumi.get(__ret__, 'name'),\n on_premises_configuration=pulumi.get(__ret__, 'on_premises_configuration'),\n out_of_disk_report=pulumi.get(__ret__, 'out_of_disk_report'),\n project=pulumi.get(__ret__, 'project'),\n region=pulumi.get(__ret__, 'region'),\n replica_configuration=pulumi.get(__ret__, 'replica_configuration'),\n replica_names=pulumi.get(__ret__, 'replica_names'),\n root_password=pulumi.get(__ret__, 'root_password'),\n satisfies_pzs=pulumi.get(__ret__, 'satisfies_pzs'),\n scheduled_maintenance=pulumi.get(__ret__, 'scheduled_maintenance'),\n secondary_gce_zone=pulumi.get(__ret__, 'secondary_gce_zone'),\n self_link=pulumi.get(__ret__, 'self_link'),\n server_ca_cert=pulumi.get(__ret__, 'server_ca_cert'),\n service_account_email_address=pulumi.get(__ret__, 'service_account_email_address'),\n settings=pulumi.get(__ret__, 'settings'),\n state=pulumi.get(__ret__, 'state'),\n suspension_reason=pulumi.get(__ret__, 'suspension_reason'))",
"def power_on(self, context, instance, network_info, block_device_info):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.start_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def stop_instance(InstanceId=None, Force=None):\n pass",
"def toggle_server(self):\n name = request.params.get('name', g.DEFAULT_SERVER)\n log.debug('toggle_server(%s)' % name)\n servers = model.Session.query(model.Server)\n server = servers.filter(model.Server.name == name).one()\n server.server_on = not server.server_on\n model.Session.update(server)\n model.Session.commit()\n redirect_to('/admin/dashboard')",
"def test_snat_with_master_reboot(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.reboot(self.inputs.k8s_master_ip)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def test_snat_with_kubelet_restart_on_master(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = [self.inputs.k8s_master_ip])\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def create_instance(name, config, region, secrets, key_name, instance_data,\n deploypass):\n conn = connect_to_region(\n region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key']\n )\n vpc = VPCConnection(\n region=conn.region,\n aws_access_key_id=secrets['aws_access_key_id'],\n aws_secret_access_key=secrets['aws_secret_access_key'])\n\n # Make sure we don't request the same things twice\n token = str(uuid.uuid4())[:16]\n\n instance_data = instance_data.copy()\n instance_data['name'] = name\n instance_data['hostname'] = '{name}.{domain}'.format(\n name=name, domain=config['domain'])\n\n bdm = None\n if 'device_map' in config:\n bdm = BlockDeviceMapping()\n for device, device_info in config['device_map'].items():\n bdm[device] = BlockDeviceType(size=device_info['size'],\n delete_on_termination=True)\n\n ip_address = get_ip(instance_data['hostname'])\n subnet_id = None\n\n if ip_address:\n s_id = get_subnet_id(vpc, ip_address)\n if s_id in config['subnet_ids']:\n if ip_available(conn, ip_address):\n subnet_id = s_id\n else:\n log.warning(\"%s already assigned\" % ip_address)\n\n # TODO: fail if no IP assigned\n if not ip_address or not subnet_id:\n ip_address = None\n subnet_id = choice(config.get('subnet_ids'))\n\n while True:\n try:\n reservation = conn.run_instances(\n image_id=config['ami'],\n key_name=key_name,\n instance_type=config['instance_type'],\n block_device_map=bdm,\n client_token=token,\n subnet_id=subnet_id,\n private_ip_address=ip_address,\n disable_api_termination=bool(config.get('disable_api_termination')),\n security_group_ids=config.get('security_group_ids', []),\n )\n break\n except boto.exception.BotoServerError:\n log.exception(\"Cannot start an instance\")\n time.sleep(10)\n\n instance = reservation.instances[0]\n log.info(\"instance %s created, waiting to come up\", instance)\n # Wait for the instance to come up\n while True:\n try:\n instance.update()\n if instance.state == 'running':\n break\n except Exception:\n log.exception(\"hit error waiting for instance to come up\")\n time.sleep(10)\n\n instance.add_tag('Name', name)\n instance.add_tag('FQDN', instance_data['hostname'])\n instance.add_tag('created', time.strftime(\"%Y-%m-%d %H:%M:%S %Z\",\n time.gmtime()))\n instance.add_tag('moz-type', config['type'])\n\n log.info(\"assimilating %s\", instance)\n instance.add_tag('moz-state', 'pending')\n while True:\n try:\n assimilate(instance.private_ip_address, config, instance_data, deploypass)\n break\n except:\n log.exception(\"problem assimilating %s\", instance)\n time.sleep(10)\n instance.add_tag('moz-state', 'ready')",
"def _is_instance_running(settings, instance_id_ip,\n ip_given=False):\n instance = get_this_instance(settings,\n instance_id_ip, ip_given)\n if instance:\n if ip_given:\n ip_address = instance_id_ip\n else:\n ip_address = get_instance_ip(instance)\n state = instance.state\n print 'Current status of Instance'\\\n ' with IP [%s]: %s' %(ip_address, state)\n if state == \"running\" and ip_address:\n return True\n return False",
"def assign_instance(InstanceId=None, LayerIds=None):\n pass",
"def step_impl(context, instance_number):\n num_try = 60\n interval = 10\n success = False\n\n for i in range(num_try):\n time.sleep(interval)\n context.service_instances = context.service.status()['replicaStatus']\n if len(context.service_instances) == int(instance_number):\n success = True\n break\n context.dl.logger.debug(\"Step is running for {:.2f}[s] and now Going to sleep {:.2f}[s]\".format((i + 1) * interval,\n interval))\n\n assert success, \"TEST FAILED: Expected {}, Got {}\".format(instance_number, len(context.service_instances))",
"def bounce_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\")\n else:\n cmd = _traffic_line(\"-B\")\n\n return _subprocess(cmd)",
"def launch_instance_vpc ( ec2_conn,\n ami,\n base_name,\n instance_type,\n keypair,\n subnet_id,\n security_group_id,\n machine_type = 'm3.medium',\n user_data = None,\n wait_for_running = True,\n public_ip = False,\n static_ip_address = None,\n monitor_params = None ) :\n interfaces = None\n subnet = None\n security_group_ids = None\n \n if static_ip_address is None:\n spec = boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id = subnet_id,\n groups = [ security_group_id ],\n associate_public_ip_address = public_ip )\n interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection( spec )\n else:\n subnet = subnet_id\n security_group_ids = [security_group_id]\n\n instance_r = ec2_conn.run_instances( image_id = ami.id,\n key_name = keypair,\n instance_type = machine_type,\n monitoring_enabled = True,\n network_interfaces = interfaces,\n subnet_id = subnet, \n user_data = user_data,\n security_group_ids = security_group_ids,\n private_ip_address = static_ip_address )\n instance = instance_r.instances[ 0 ];\n aws_cmd( ec2_conn.create_tags,\n [ instance.id, { \"Name\": get_instance_name( base_name, instance_type ) } ] )\n \n print \"Waiting for instance to be ready\"\n \n if wait_for_running :\n running = wait_on_object_state( instance, 'running', max_wait = 600, failure_state = 'terminated' )\n if not running :\n print \"Deployment instance still not up after long period of time! Exiting...\"\n sys.exit( 3 )\n\n if monitor_params :\n print \"Adding monitoring to the instance.\"\n\n return instance",
"def test_snat_with_kubelet_restart_on_slave(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"kubelet\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(30) # Wait timer for all kubernetes pods to stablise.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)",
"def become_replica(ticket, initiated_by):\n assert model.is_standalone()\n\n # On dev appserver emulate X-Appengine-Inbound-Appid header.\n headers = {'Content-Type': 'application/octet-stream'}\n protocol = 'https'\n if utils.is_local_dev_server():\n headers['X-Appengine-Inbound-Appid'] = app_identity.get_application_id()\n protocol = 'http'\n headers['X-URLFetch-Service-Id'] = utils.get_urlfetch_service_id()\n\n # Pass back the ticket for primary to verify it, tell the primary to use\n # default version hostname to talk to us.\n link_request = replication_pb2.ServiceLinkRequest()\n link_request.ticket = ticket.ticket\n link_request.replica_url = (\n '%s://%s' % (protocol, app_identity.get_default_version_hostname()))\n link_request.initiated_by = initiated_by.to_bytes()\n\n # Primary will look at X-Appengine-Inbound-Appid and compare it to what's in\n # the ticket.\n try:\n result = urlfetch.fetch(\n url='%s/auth_service/api/v1/internal/link_replica' % ticket.primary_url,\n payload=link_request.SerializeToString(),\n method='POST',\n headers=headers,\n follow_redirects=False,\n deadline=30,\n validate_certificate=True)\n except urlfetch.Error as exc:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'URLFetch error (%s): %s' % (exc.__class__.__name__, exc))\n\n # Protobuf based protocol is not using HTTP codes (handler always replies with\n # HTTP 200, providing error details if needed in protobuf serialized body).\n # So any other status code here means there was a transport level error.\n if result.status_code != 200:\n raise ProtocolError(\n replication_pb2.ServiceLinkResponse.TRANSPORT_ERROR,\n 'Request to the primary failed with HTTP %d.' % result.status_code)\n\n link_response = replication_pb2.ServiceLinkResponse.FromString(result.content)\n if link_response.status != replication_pb2.ServiceLinkResponse.SUCCESS:\n message = LINKING_ERRORS.get(\n link_response.status,\n 'Request to the primary failed with status %d.' % link_response.status)\n raise ProtocolError(link_response.status, message)\n\n # Become replica. Auth DB will be overwritten on a first push from Primary.\n state = model.AuthReplicationState(\n key=model.replication_state_key(),\n primary_id=ticket.primary_id,\n primary_url=ticket.primary_url)\n state.put()",
"def replicaof(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"REPLICAOF is not supported in cluster mode\")",
"def start_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Start an instance\n response = ec2_resource.Instance(instance_id).start(DryRun=False)\n print(response)\n print(\"\\nSuccessfully starting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def RunStop(self, zone=None):\n if zone is None:\n zone = self.zone\n try:\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n except Exception, e:\n self.RegisterImage()\n self.run_instance_params['image'] = self.tester.ec2.get_emi(emi=self.args.emi,\n root_device_type=\"ebs\",\n basic_image=True)\n if not self.volume_1:\n self.volume_1 = self.tester.ec2.create_volume(zone=self.zone, size=2)\n if not self.volume_2:\n self.volume_2 = self.tester.ec2.create_volume(zone=self.zone, size=1)\n\n if self.reservation:\n self.tester.ec2.terminate_instances(self.reservation)\n self.reservation = self.tester.ec2.run_image(**self.run_instance_params)\n ## Ensure that we can attach and use a volume\n for instance in self.reservation.instances:\n instance.attach_volume(self.volume_1, self.test_volume_1_path)\n instance.attach_volume(self.volume_2, self.test_volume_2_path)\n self.tester.ec2.stop_instances(self.reservation)\n for instance in self.reservation.instances:\n if instance.ip_address or instance.private_ip_address:\n raise Exception(\"Instance had a public \" + str(instance.ip_address) + \" private \" + str(instance.private_ip_address) )\n if instance.block_device_mapping[self.test_volume_1_path] is None:\n raise Exception(\"DBM path is invalid\")\n if self.volume_1.id != instance.block_device_mapping[self.test_volume_1_path].volume_id:\n raise Exception(\"Volume id does not match\")",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def test_least_busy_host_gets_instance_no_queue(self):\n s_ref = self._create_compute_service(host='host1')\n s_ref2 = self._create_compute_service(host='host2')\n instance_id1 = self._create_instance(host='host1')\n instance_id2 = self._create_instance()\n host = self.scheduler.driver.schedule_run_instance(self.context,\n instance_id2)\n self.assertEqual(host, 'host2')\n db.instance_destroy(self.context, instance_id2)\n db.instance_destroy(self.context, instance_id1)\n db.instance_destroy(self.context, s_ref['id'])\n db.instance_destroy(self.context, s_ref2['id'])",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def run_cluster(autoscaling: bool = False, **options) -> None:\n if autoscaling:\n thread = AutoScalingCluster.new(**options)\n else:\n thread = RemoteCluster.new(**options)\n try:\n thread.join()\n except Exception:\n thread.stop()\n raise",
"def test_live_migration_src_check_instance_not_running(self):\n\n instance_id = self._create_instance(power_state=power_state.NOSTATE)\n i_ref = db.instance_get(self.context, instance_id)\n\n try:\n self.scheduler.driver._live_migration_src_check(self.context,\n i_ref)\n except exception.Invalid, e:\n c = (e.message.find('is not running') > 0)\n\n self.assertTrue(c)\n db.instance_destroy(self.context, instance_id)",
"def deploy_instance(self, pool):\n\n if vlb_db.get_vlb_from_pool_id(pool['pool']['id']) is not None:\n LOG.debug('This is an error')\n return\n name = 'vlb_{0}'.format(os.urandom(6).encode('hex'))\n nova_client = self._get_nova_client()\n neutron_client = self._get_neutron_client()\n\n subnet = neutron_client.show_subnet(pool['pool']['subnet_id'])\n\n LOG.debug('brocade_vlb_driver::deploy_instance %s' % name)\n vLb = nova_client.servers.create(name, self.conf.brocade_vlb.image_id,\n self.conf.brocade_vlb.flavor_id,\n nics=[ {'net-id': self.conf.brocade_vlb.management_network_id },\n {'net-id': subnet['subnet']['network_id'] }]\n )\n\n def _vLb_active():\n while True:\n try:\n instance = nova_client.servers.get(vLb.id)\n except Exception:\n yield self.conf.brocade_vlb.nova_poll_interval\n continue\n LOG.info(_(\"vLB Driver::Load Balancer instance status: %s\")\n %instance.status)\n if instance.status not in ('ACTIVE', 'ERROR'):\n yield self.conf.brocade_vlb.nova_poll_interval\n elif instance.status == 'ERROR':\n raise InstanceSpawnError()\n else:\n break\n self._wait(_vLb_active, \n timeout=self.conf.brocade_vlb.nova_spawn_timeout)\n LOG.info(_(\"vLB Driver::Waiting for the vLB app to initialize %s\") %\n vLb.id)\n\n mgmt_ip = self._get_address(vLb,\n self.conf.brocade_vlb.management_network_id)\n data_ip = self._get_address(vLb, subnet['subnet']['network_id'])\n vlb_db.create_vlb(pool['pool']['id'], vLb.id, vLb.tenant_id, vLb.name,\n data_ip, mgmt_ip)\n\n\t# Now wait for vlb to boot\n def _vLb_soap():\n while True:\n try:\n impl = driver_impl.BrocadeAdxDeviceDriverImpl(\n self.conf.brocade_vlb.username,\n self.conf.brocade_vlb.password,\n mgmt_ip)\n impl.create_pool(pool['pool'])\n impl.ifconfig_e1(data_ip,subnet['subnet']['cidr'])\n impl.create_static_route('0.0.0.0','0',subnet['subnet']['gateway_ip'])\n impl.enable_source_nat()\n except Exception as e:\n LOG.debug('vLB Driver::Load Balancer instance %s' % e)\n yield self.conf.brocade_vlb.vlb_poll_interval\n continue\n break\n self._wait(_vLb_soap, timeout=self.conf.brocade_vlb.vlb_boot_timeout)\n\n LOG.info(_(\"vLB Driver:vLB successfully deployed and configured\"))",
"def view_instance(self, instance_name, check=True):\n self.page_instances().table_instances.row(\n name=instance_name).link_instance.click()\n\n if check:\n assert self.app.page_instance.info_instance.label_name.value \\\n == instance_name",
"def restart_cluster():\n\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"cluster\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-M\")\n\n return _subprocess(cmd)",
"def failover_replica(self) -> pulumi.Output['outputs.InstanceFailoverReplicaResponse']:\n return pulumi.get(self, \"failover_replica\")",
"def test_start_stop(self):\n if not os.path.isfile(twillm.CONFIG_FILE):\n raise EnvironmentError(\"'%s' config file not found\" % \\\n twillm.CONFIG_FILE)\n\n twillm.use_aws_creds('me')\n\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()\n twillm.startinstance('ubuntu1010x64')\n assert twillm.showinstances() == 1, 'there should be 1 instance ' \\\n 'running, there are %d' % twillm.showinstances()\n \n twillm.stopinstances()\n assert twillm.showinstances() == 0, 'there should be 0 instances ' \\\n 'running, there are %d' % twillm.showinstances()",
"def show_instance(name, call=None):\n if call != \"action\":\n raise SaltCloudSystemExit(\n \"The show_instance action must be called with -a or --action.\"\n )\n\n nodes = list_nodes_full()\n __utils__[\"cloud.cache_node\"](nodes[name], _get_active_provider_name(), __opts__)\n return nodes[name]",
"def test_assign_configuration_to_valid_instance(self):\n print(\"instance_info.id: %s\" % instance_info.id)\n print(\"configuration_info: %s\" % configuration_info)\n print(\"configuration_info.id: %s\" % configuration_info.id)\n config_id = configuration_info.id\n instance_info.dbaas.instances.modify(instance_info.id,\n configuration=config_id)\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 202)",
"def stop_instance(tcserver_dir, instance_name=\"instance1\"):\n print(\"Stopping a tcServer instance...\")\n\n pushdir(tcserver_dir)\n subprocess.call([\"./tcruntime-ctl.sh\", instance_name, \"stop\"])\n popdir()",
"def configure_cluster(ctx, zone, db_instance_name):\n ctx.run(init_pg_servers_play_run(zone, db_instance_name), pty=True, echo=True)",
"def createInstance(ec2,ami,nb_nodes,placement,instance_type,key,sg,user_data=None):\n\n reservation = ec2.run_instances(ami,min_count=nb_nodes,max_count=nb_nodes,placement = placement,key_name=key,security_groups=[sg],instance_type=instance_type,user_data=user_data)\n instance = reservation.instances[0]\n return instance",
"def instance_view(self) -> 'outputs.DedicatedHostInstanceViewResponse':\n return pulumi.get(self, \"instance_view\")",
"def start_stop_instances(instances, schedule):\n for reservation in instances:\n for instance in reservation.instances:\n region = instance.placement\n if instance.state == 'running' and _get_desired_state(schedule) == 'stop':\n print \"Should stop \" + instance.id + \".\"\n instance.stop()\n elif instance.state == 'stopped' and _get_desired_state(schedule) == 'start':\n print \"Should start \" + instance.id + \".\"\n instance.start()\n else:\n print \"Nothing to do.\"",
"def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False",
"def running_cluster(request):\n cluster_name = 'running-cluster-' + random_string()\n launch_cluster(\n cluster_name=cluster_name,\n instance_type=request.param.instance_type,\n spark_version=request.param.spark_version,\n spark_git_commit=request.param.spark_git_commit)\n\n if request.param.restarted:\n stop_cluster(cluster_name)\n start_cluster(cluster_name)\n\n def destroy():\n p = subprocess.run([\n 'flintrock', 'destroy', cluster_name, '--assume-yes'])\n assert p.returncode == 0\n request.addfinalizer(destroy)\n\n return cluster_name",
"def do_instance_show(cs, args):\n try:\n instance = cs.instances.detail(args.instance)\n except exceptions.NotFound as e:\n msg = \"No server with an id of '%s' exists\" % args.instance\n e.message = msg\n raise\n\n _print_server_details(instance)",
"def _choose_among_running_instances(self):\n\n instances = self.compute.get_running_instances_ids()\n\n # No instances\n if not instances:\n print 'You do not have any running instances!'\n return None\n\n # List the name of the instances\n print 'Choose an instance:'\n for i, instance in enumerate(instances):\n print '%d) %s' % ((i + 1), instance)\n print\n\n # Choose an instance\n instance_id = ''\n while True:\n\n choice = raw_input(\"Instance target number or ID (empty to cancel): \")\n\n # Cancel\n if not choice:\n return None\n\n # Valid choice\n if choice in instances:\n instance_id = choice\n break\n choice = int(choice)\n if 1 <= choice <= len(instances):\n instance_id = instances[choice - 1]\n break\n\n # Invalid option\n print 'Incorrect option!'\n continue\n\n print\n return instance_id",
"def start_instance(ec2_client, instances):\n # get a list of instance ids\n instances_ids = [i.instance_id for i in instances]\n \n # start the instances\n print(\"\\n===Creating EC2 instance.\")\n ec2_client.start_instances(InstanceIds=instances_ids)\n \n # wait till instance is ready\n waiter = ec2_client.get_waiter(\"instance_running\")\n waiter.wait(InstanceIds=instances_ids)\n print(\"===EC2 instance is ready!\")",
"def nfvi_cold_migrate_instance(instance_uuid, callback, to_host_name=None,\n context=None):\n if context is None:\n cmd_id = _compute_plugin.invoke_plugin_expediate(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n else:\n cmd_id = _compute_plugin.invoke_plugin(\n 'cold_migrate_instance', instance_uuid, to_host_name, context,\n callback=callback)\n return cmd_id",
"def _ensureComponentRunning(self, shouldBeRunning):\n for instance in shouldBeRunning:\n self.log.info(\"Starting instance %s\" % instance)\n system, name = instance.split('__')\n if self.controlComponents:\n res = self.sysAdminClient.startComponent(system, name)\n if not res['OK']:\n self.logError(\"Failed to start component:\", \"%s: %s\" % (instance, res['Message']))\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance was down, started instance\"\n else:\n self.accounting[instance][\"Treatment\"] = \"Instance is down, should be started\"",
"def test_launch_volume_as_instance(self, volume, instances_steps,\n volumes_steps):\n instance_name = next(generate_ids('instance'))\n volumes_steps.launch_volume_as_instance(\n volume.name, instance_name, network_name=INTERNAL_NETWORK_NAME)\n\n instances_steps.page_instances().table_instances.row(\n name=instance_name).wait_for_status('Active')\n instances_steps.delete_instance(instance_name)",
"def test_run_terminate_no_image(self):\n params = {'image_ref': ''}\n instance = self._create_fake_instance_obj(params)\n self.compute.build_and_run_instance(self.context, instance, {}, {}, {},\n [], block_device_mapping=[])\n self._assert_state({'vm_state': vm_states.ACTIVE,\n 'task_state': None})\n\n self.compute.terminate_instance(self.context, instance, [])\n instances = db.instance_get_all(self.context)\n self.assertEqual(len(instances), 0)",
"def pause(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if vmrun.pause() is None:\n puts_err(colored.red(\"Not paused\", vmrun))\n else:\n puts_err(colored.yellow(\"Paused\", vmrun))",
"def power_off(self, instance, node=None):\n if not node:\n node = _get_baremetal_node_by_instance_uuid(instance['uuid'])\n pm = get_power_manager(node=node, instance=instance)\n pm.deactivate_node()\n if pm.state != baremetal_states.DELETED:\n raise exception.InstancePowerOffFailure(_(\n \"Baremetal power manager failed to stop node \"\n \"for instance %r\") % instance['uuid'])\n pm.stop_console()",
"def terminateInstance(region,zone,instance_id):\n\ttry:\n\t\tec2 = boto.ec2.connect_to_region(region+'-'+zone)\n\t\tec2.terminate_instances(instance_ids=[instance_id])\n\t\treturn True\n\texcept Exception as e:\n\t\tlogError(e)\n\t\treturn False",
"def lock_instance(self, instance_name, check=True):\n with self.page_instances().table_instances.row(\n name=instance_name).dropdown_menu as menu:\n menu.button_toggle.click()\n menu.item_lock.click()\n\n if check:\n self.close_notification('success')",
"def create_instance(self, nova, image_name, instance_name, flavor):\n image = nova.images.find(name=image_name)\n flavor = nova.flavors.find(name=flavor)\n instance = nova.servers.create(name=instance_name, image=image,\n flavor=flavor)\n\n count = 1\n status = instance.status\n while status != 'ACTIVE' and count < 60:\n time.sleep(3)\n instance = nova.servers.get(instance.id)\n status = instance.status\n self.log.debug('instance status: {}'.format(status))\n count += 1\n\n if status != 'ACTIVE':\n self.log.error('instance creation timed out')\n return None\n\n return instance",
"def quick_instance(self, name, image, instance_type, env_tag='dev', zone_tag='starwatts', os_tag='debian', sg_id=None,\n private=True, extra_sg_ids=None, extra_tags=None, terminate_on_shutdown=False,\n debug=False):\n # Debug setting\n if debug:\n logging.basicConfig(level=logging.DEBUG)\n\n # Preliminary tests\n try:\n ami = self.get_image(image_id=image)\n except EC2ResponseError:\n logging.error(\"The image {} could not be found. Aborting.\".format(image))\n return\n print(\"Using AMI {} : {}\".format(image, ami.name))\n if len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'running'})) > 0 or \\\n len(self.get_only_instances(filters={'tag:name': name, 'instance-state-name': 'stopped'})) > 0:\n logging.error(\"An instance with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No instance has the same 'name' tag.\")\n if self.keypair_exists(name):\n logging.error(\"A keypair with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No keypair was found with the same name.\")\n if sg_id is None:\n if self.security_group_exists(name=name):\n logging.error(\"A security group with the same name ({}) already exists. Aborting.\".format(name))\n return\n logging.debug(\"Test passed : No security group was found with the same name.\")\n\n # Tags generation\n logging.debug(\"Generating tags to apply.\")\n tags = dict(name=name, os=os_tag, env=env_tag, zone=zone_tag, privacy='true' if private else 'false')\n if extra_tags is not None:\n tags.update(extra_tags)\n print(\"Tags : {}\".format(tags))\n\n # Fetching needed security groups (bastion and zabbix)\n standard_sg = self.get_all_security_groups(groupnames=['standard'])\n if len(standard_sg) != 1:\n logging.error(\"Multiple or no security group was found for the 'bastion' search. Aborting.\")\n return\n standard_sg = standard_sg[0]\n logging.debug(\"The following security group was found for 'standard : {} {}\".format(standard_sg.id,\n standard_sg.description))\n\n # Security group creation\n if sg_id is None:\n sg = self.create_security_group(name, \"SG applied to {} VM\".format(name))\n sg_id = sg.id\n\n sg_ids = [sg_id, standard_sg.id, ]\n # Using the extra security groups if any\n if extra_sg_ids is not None:\n logging.debug(\"Extra security groups to add : {}\".format(extra_sg_ids))\n sg_ids.extend(extra_sg_ids)\n logging.debug(\"Security Groups : {}\".format(sg_ids))\n\n user_data = \"-----BEGIN OUTSCALE SECTION-----\\nprivate_only=true\\n-----END OUTSCALE SECTION-----\" if private else \"\"\n logging.debug(\"Creating keypair.\")\n kp = self.create_key_pair(key_name=name)\n fp = os.path.join(os.path.expanduser('~/.ssh'), '%s.pem' % kp.name)\n with open(fp, 'wb') as fd:\n fd.write(bytes(kp.material, \"UTF-8\"))\n logging.debug(\"Keypair written to ~/.ssh/{}.pem\".format(name))\n\n resa = self.run_instances(image_id=image, key_name=name, security_groups=sg_ids, instance_type=instance_type,\n user_data=user_data,\n instance_initiated_shutdown_behavior='terminate' if terminate_on_shutdown else 'stop')\n inst = resa.instances[0]\n logging.debug(\"Adding tags to the newly created machine.\")\n inst.add_tags(tags)\n return inst",
"def test_ec2_up_no_instance(runner, ec2):\n result = runner.invoke(cli.cli, ['ec2', 'up', '-i', 'dummy'])\n assert result.exit_code == 2"
] |
[
"0.6103812",
"0.6096315",
"0.60282797",
"0.5838405",
"0.5826192",
"0.57585657",
"0.5728754",
"0.5659324",
"0.5638693",
"0.5576284",
"0.55599195",
"0.554497",
"0.554396",
"0.55413884",
"0.5492706",
"0.54776263",
"0.5457303",
"0.5453998",
"0.54452205",
"0.5434242",
"0.5428812",
"0.542578",
"0.53939855",
"0.53932935",
"0.5379554",
"0.53741324",
"0.53683114",
"0.53553444",
"0.53550524",
"0.53342515",
"0.5320723",
"0.5315802",
"0.5307694",
"0.53037935",
"0.53028154",
"0.5292922",
"0.5275318",
"0.52706665",
"0.52641267",
"0.5262683",
"0.52572346",
"0.5243125",
"0.52418864",
"0.5225034",
"0.52249634",
"0.52125555",
"0.5189232",
"0.5187228",
"0.51804364",
"0.51803565",
"0.5177075",
"0.51640856",
"0.5157233",
"0.5155122",
"0.51424897",
"0.5141274",
"0.513783",
"0.5136691",
"0.513227",
"0.51277107",
"0.5121559",
"0.5119916",
"0.5117166",
"0.51170045",
"0.5100153",
"0.5100115",
"0.50975764",
"0.5096875",
"0.50791",
"0.5069631",
"0.50653076",
"0.50627875",
"0.50565076",
"0.5055254",
"0.50478375",
"0.50373626",
"0.5036676",
"0.50285935",
"0.50252837",
"0.50163454",
"0.5008845",
"0.50081223",
"0.5006702",
"0.5005379",
"0.49919572",
"0.49845204",
"0.4970847",
"0.4965872",
"0.49637097",
"0.49636313",
"0.49545226",
"0.49535587",
"0.49497423",
"0.49387133",
"0.49380082",
"0.49341375",
"0.49316606",
"0.4929528",
"0.49252832",
"0.49139285",
"0.49076462"
] |
0.0
|
-1
|
You can create multiple tags and bind them to multiple instances. This allows you to classify and filter instances by tag. A tag consists of a key and a value. Each key must be unique in a region for an Alibaba Cloud account. Different keys can have the same value. If the tag you specify does not exist, this tag is automatically created and bound to the specified instance. If a tag that has the same key is already bound to the instance, the new tag overwrites the existing tag. You can bind up to 20 tags to each instance. You can bind tags to up to 50 instances each time you call the operation.
|
def tag_resources_with_options(
self,
request: dds_20151201_models.TagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.TagResourcesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.region_id):
query['RegionId'] = request.region_id
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TagResources',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.TagResourcesResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')",
"def _BindSecureTagsToInstances(\n network_name, project, tag_mapping_file_name, compute_client\n):\n tag_mapping = _ReadTagMapping(tag_mapping_file_name)\n if not tag_mapping:\n return\n\n vm_instances = _GetInstancesInNetwork(project, network_name, compute_client)\n\n for vm in vm_instances:\n _BindTagsToInstance(tag_mapping, vm)\n _BindServiceTagsToInstance(tag_mapping, vm)",
"def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)",
"def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)",
"def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response",
"def tag_instance_subnet(self, tags):\n self._request({\"instance-subnet-tags\": dict(tags)})",
"def create_tags(tags_list):\n\n Tags.create_multiple(tags_list)",
"def attachInstanceTags(instance_id, tags):\n \n empty = False\n lambda_client = boto3.client('lambda')\n data = {\n 'comp_name': \"attachInstanceTags\", \n 'action': \"attach tags\", \n 'level': \"info\", \n 'msg': \"attached \" + str(tags) + \" to instance \" + instance_id\n } \n try:\n client = boto3.client('ec2')\n response = client.create_tags(\n Resources=[instance_id],\n Tags= tags\n )\n print(\"Attached tags to instance\")\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':\n print(\"No such instance exists\")\n empty = True\n else:\n print(\"Error attaching tags to instance: \" + str(e))\n \n if (not empty):\n invoke_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"notify_snitch\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(data)\n )",
"def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})",
"def create_or_update_tags(self, Tags):\n tag = Tags[0]\n asg_name = tag['ResourceId']\n ec2_tag = {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n try:\n response = self.asg.create_or_update_tags(\n Tags=Tags\n )\n except Exception as e:\n logger.error('Unknown Error: %s', str(e))\n else:\n logger.info(response)\n\n asg_instances = self.get_asg_instance_ids(asg_name)\n return EC2Wrapper(self.session).create_tags(Resources=asg_instances, Tags=[ec2_tag])",
"def AddInstanceTags(self, instance, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def _BindTagToInstance(tag_value, instance):\n messages = rm_tags.TagMessages()\n resource_name = _GetFullCanonicalResourceName(instance)\n\n tag_binding = messages.TagBinding(parent=resource_name, tagValue=tag_value)\n binding_req = messages.CloudresourcemanagerTagBindingsCreateRequest(\n tagBinding=tag_binding\n )\n\n location = _GetInstanceLocation(instance)\n\n with endpoints.CrmEndpointOverrides(location):\n try:\n op = rm_tags.TagBindingsService().Create(binding_req)\n if not op.done:\n operations.WaitForReturnOperation(\n op,\n 'Waiting for TagBinding for parent [{}] and tag value [{}] to be '\n 'created with [{}]'.format(resource_name, tag_value, op.name),\n )\n except Exception as e: # pylint: disable=broad-except\n log.status.Print('Tag binding could not be created: ' + repr(e))",
"def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()",
"def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)",
"def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value",
"def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def CreateTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'ec2':\n response = Client.create_tags(\n Resources = [\n\t\t ResourceId\n\t\t],\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'efs':\n response = Client.create_tags(\n FileSystemId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'redshift':\n response = Client.create_tags(\n ResourceName = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'workspaces':\n response = Client.create_tags(\n ResourceId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def create_tags(configurationIds=None, tags=None):\n pass",
"def upsert_tags(self, entry, tags):\n if not tags:\n return\n\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for tag in tags:\n logging.info('Processing Tag from Template: %s ...', tag.template)\n\n tag_to_create = tag\n tag_to_update = None\n for persisted_tag in persisted_tags:\n # The column field is not case sensitive.\n if tag.template == persisted_tag.template and \\\n tag.column.lower() == persisted_tag.column.lower():\n\n tag_to_create = None\n tag.name = persisted_tag.name\n if not self.__tag_fields_are_equal(tag, persisted_tag):\n tag_to_update = tag\n break\n\n if tag_to_create:\n created_tag = self.create_tag(entry.name, tag_to_create)\n logging.info('Tag created: %s', created_tag.name)\n elif tag_to_update:\n self.update_tag(tag_to_update)\n logging.info('Tag updated: %s', tag_to_update.name)\n else:\n logging.info('Tag is up-to-date: %s', tag.name)",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def tag(self, uuid, tags):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.tag(uuid, tags)",
"def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances",
"def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()",
"def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})",
"def create_tags(ResourceArn=None, Tags=None):\n pass",
"def tags():",
"def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)",
"def AddClusterTags(self, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"def add_tag(names, tag):\n for name in names:\n b = Box.query.filter_by(name=name).first()\n b.tags.add(tag)\n db.session.commit()",
"def add_tagging(self, task_instance):",
"def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)",
"def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None) -> Iterable[List[Boto2Instance]]:\n def spotRequestNotFound(e):\n return getattr(e, 'error_code', None) == \"InvalidSpotInstanceRequestID.NotFound\"\n\n for attempt in retry_ec2(retry_for=a_long_time,\n retry_while=inconsistencies_detected):\n with attempt:\n requests = ec2.request_spot_instances(\n price, image_id, count=num_instances, **spec)\n\n if tags is not None:\n for requestID in (request.id for request in requests):\n for attempt in retry_ec2(retry_while=spotRequestNotFound):\n with attempt:\n ec2.create_tags([requestID], tags)\n\n num_active, num_other = 0, 0\n # noinspection PyUnboundLocalVariable,PyTypeChecker\n # request_spot_instances's type annotation is wrong\n for batch in wait_spot_requests_active(ec2,\n requests,\n timeout=timeout,\n tentative=tentative):\n instance_ids = []\n for request in batch:\n if request.state == 'active':\n instance_ids.append(request.instance_id)\n num_active += 1\n else:\n logger.info(\n 'Request %s in unexpected state %s.',\n request.id,\n request.state)\n num_other += 1\n if instance_ids:\n # This next line is the reason we batch. It's so we can get multiple instances in\n # a single request.\n yield ec2.get_only_instances(instance_ids)\n if not num_active:\n message = 'None of the spot requests entered the active state'\n if tentative:\n logger.warning(message + '.')\n else:\n raise RuntimeError(message)\n if num_other:\n logger.warning('%i request(s) entered a state other than active.', num_other)",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def AddTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'es':\n response = Client.add_tags (\n ARN = ResourceId,\n TagList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'emr':\n response = Client.add_tags (\n ResourceId = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'cloudtrail':\n response = Client.add_tags (\n ResourceId = ResourceId,\n TagsList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'sagemaker':\n response = Client.add_tags (\n ResourceArn = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'datapipeline':\n response = Client.add_tags (\n pipelineId = ResourceId,\n tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def create_tags(apps, _):\n type_model = apps.get_model(\"projects\", \"Type\")\n tag_model = apps.get_model(\"projects\", \"Tag\")\n\n # Tags which are a 1:1 migration\n global education_tag\n global covid_tag\n global innovative_tag\n global other_tag\n education_tag = tag_model(\n name=\"Computing Education\",\n description=\"Seeding inclusive computing education for the next generation \"\n \"and all computer-science training\",\n )\n covid_tag = tag_model(\n name=\"COVID\",\n description=\"Related to COVID-19\",\n )\n innovative_tag = tag_model(\n name=\"Innovative Application\", description=\"Applications for domain sciences\"\n )\n other_tag = tag_model(\n name=\"Other\",\n description=\"My project research area doesn’t fit in any of \"\n \"the predefined categories\",\n )\n\n tags = [\n education_tag,\n covid_tag,\n innovative_tag,\n other_tag,\n tag_model(\n name=\"Computer Architecture\",\n description=\"Designing computer systems optimized for high performance, \"\n \"energy efficiency, and scalability\",\n ),\n tag_model(\n name=\"Data Science\",\n description=\"Developing algorithms for managing and analyzing data at scale\",\n ),\n tag_model(\n name=\"Database Systems\",\n description=\"Designing systems for managing and storing data at scale\",\n ),\n tag_model(\n name=\"Human Computer Interaction\",\n description=\"Exploring the interfaces between people and technologies\",\n ),\n tag_model(\n name=\"AI and Machine Learning\",\n description=\"Foundations and applications of computer algorithms making \"\n \"data-centric models, predictions, and decisions\",\n ),\n tag_model(\n name=\"Networking\",\n description=\"Analysis, design, implementation, and use of local, \"\n \"wide-area, and mobile networks that link computers together\",\n ),\n tag_model(\n name=\"Programming Languages\",\n description=\"Devising new and better ways of programming the computers\",\n ),\n tag_model(\n name=\"Robotics\",\n description=\"Design, construction, operation, and use of robots\",\n ),\n tag_model(\n name=\"Scientific and High-Performance Computing\",\n description=\"Scientific discovery at the frontiers of computational \"\n \"performance, intelligence, and scale\",\n ),\n tag_model(\n name=\"Security and Privacy\",\n description=\"Understanding and defending against emerging threats in our \"\n \"increasingly computational world\",\n ),\n tag_model(\n name=\"Software Engineering\",\n description=\"Design, development, testing, and maintenance of \"\n \"software applications\",\n ),\n tag_model(\n name=\"Distributed Systems\",\n description=\"Harness the power of multiple computational units\",\n ),\n tag_model(\n name=\"Operating Systems\",\n description=\"Analysis, design, and implementation of operating systems\",\n ),\n tag_model(\n name=\"Storage Systems\",\n description=\"Capturing, managing, securing, and prioritizing data\",\n ),\n tag_model(\n name=\"Cloud Computing\",\n description=\"Delivering computing services over the Internet to offer \"\n \"faster innovation, flexible resources, and economies of scale\",\n ),\n tag_model(\n name=\"Edge Computing\",\n description=\"Bring applications closer to data sources such as IoT \"\n \"devices or local edge servers\",\n ),\n tag_model(\n name=\"Vision and Graphics\",\n description=\"Creating and analyzing data from the visual world, \"\n \"and visually understanding complex data\",\n ),\n tag_model(\n name=\"Theory of Computation\",\n description=\"Mathematical foundations of computation, including \"\n \"algorithm design, complexity and logic\",\n ),\n tag_model(\n name=\"Daypass\",\n description=\"Daypass project\",\n expose=False,\n ),\n ]\n\n tag_model.objects.bulk_create(tags)\n\n if type_model.objects.count() == 0:\n return\n covid_type = type_model.objects.get(name=\"COVID\")\n research_type = type_model.objects.get(name=\"CS Research\")\n education_type = type_model.objects.get(name=\"Education\")\n innovative_type = type_model.objects.get(name=\"Innovative Application\")\n\n # Gather the old tags. We have to remove the type model from the project model\n # to add the projects to the new tag model,\n # So all we do is collect them here, and then move them later.\n global old_covid_projects\n global old_research_projects\n global old_education_projects\n global old_innovative_projects\n old_covid_projects = list(covid_type.project_type.all())\n old_research_projects = list(research_type.project_type.all())\n old_education_projects = list(education_type.project_type.all())\n old_innovative_projects = list(innovative_type.project_type.all())",
"def add_tags(self, image_id, tags):\n\t\tfor tag in tags:\n\t\t\timage_tag = ImageTag(image_id=image_id, tag_id=tag)\n\t\t\tself.session.add(image_tag)\n\n\t\tself.session.commit()",
"def tag(profile, internet_gateway, key, value):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"Resources\"] = [internet_gateway]\n params[\"Tags\"] = [{\"Key\": key, \"Value\": value}]\n return client.create_tags(**params)",
"def add_tags_to_resource(ResourceId=None, Tags=None):\n pass",
"def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def createPatchTag(tags, instance_id, nt_id):\n\n client = boto3.client('ssm')\n response = client.describe_instance_information(\n InstanceInformationFilterList=[\n {\n 'key': 'InstanceIds',\n 'valueSet': [instance_id]\n }\n ]\n )\n patch_tag_value = ''\n platform_name = ''\n if (response['InstanceInformationList']):\n platform_name = response['InstanceInformationList'][0]['PlatformName'] \n if 'Red Hat Enterprise Linux' in platform_name:\n patch_tag_value = 'default-rhel'\n elif 'Windows' in platform_name:\n patch_tag_value = 'default-windows'\n elif 'Ubuntu' in platform_name:\n patch_tag_value = 'default-ubuntu'\n elif 'Centos' in platform_name:\n patch_tag_value = 'default-centos'\n elif 'Amazon Linux 2' in platform_name:\n patch_tag_value = 'default-amazon2'\n elif 'Amazon Linux' in platform_name:\n patch_tag_value = 'default-amazon'\n else:\n print(\"No patch group found for platform\")\n patch_tag_value = 'Not yet populated'\n\n return patch_tag_value",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def register_elb_instances(elbclient, elbname, instance_ids):\r\n Instances = list(map(\r\n lambda x: {'InstanceId': x},\r\n instance_ids\r\n ))\r\n try:\r\n elbclient.register_instances_with_load_balancer(\r\n LoadBalancerName=elbname,\r\n Instances=Instances,\r\n DryRun=True\r\n )\r\n except Exception as ex:\r\n print(ex.message)\r\n return False\r\n return True",
"def pool_into(self, target):\n for taggable in taggables().values():\n for t in taggable.by_user(self.owner).filter(tags=self):\n t.tag(target)",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def AddTagsToResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'rds':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'elasticache':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'ds':\n response = Client.add_tags_to_resource (\n ResourceId = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)",
"def add_tags(ResourceArn=None, Tags=None):\n pass",
"def start(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.start_instances(InstanceIds=[instance_id])\n print(f\"Start instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def tag_resource(resourceArn=None, tags=None):\n pass",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})",
"def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)",
"def create_instances(ec2_resource: ServiceResource,\n image_id: str,\n key_name: str,\n instance_type: str,\n num_instances: int = 1,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: str = None,\n tags: Optional[Dict[str, str]] = None) -> List[dict]:\n logger.info('Creating %s instance(s) ... ', instance_type)\n\n if isinstance(user_data, str):\n user_data = user_data.encode('utf-8')\n\n request = {'ImageId': image_id,\n 'MinCount': num_instances,\n 'MaxCount': num_instances,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n request['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n request['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n return ec2_resource.create_instances(**prune(request))",
"async def add_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"insert into tags(tag_name, map_id) \n select ?, (select map_id from maps where map_path=?)\n where not exists\n (select * from tags where tag_name = ? and map_id = (select map_id from maps where map_path=?)) \"\"\"\n select(conn, insert_sql, (tag, map_name, tag, map_name))\n await channel.send(f\"Added tags `{' '.join(tags)}` for map {map_name} if it wasn't set\")",
"def create_tags(tags_path: Path, email: str, password: str, host_url: str):\n with open(tags_path) as f:\n tags_json = json.load(f)\n\n client = client_util.make_client(host_url, email, password)\n\n # Build dictionary of tags as they exist on the server, mapped by slug.\n online_tags_resp = api_get_tags.sync_detailed(client=client)\n if online_tags_resp.status_code != HTTPStatus.OK:\n click.echo(f\"Request to get tags failed with status {online_tags_resp}\")\n exit(1)\n online_tags = {\n online_tag.slug: online_tag for online_tag in online_tags_resp.parsed\n }\n\n # Record slugs of tags that failed.\n failures = set()\n\n for tag in tags_json[\"tags\"]:\n slug = tag[\"slug\"]\n name = tag[\"name\"]\n description = tag[\"description\"]\n color = tag.get(\"color\")\n\n if slug in online_tags:\n # Update\n online_tag = online_tags[slug]\n if (\n name == online_tag.name\n and description == online_tag.description\n and (color is None or color == online_tag.color)\n ):\n click.echo(f\"Tag {slug} is already up to date.\")\n else:\n click.echo(f\"Updating tag {slug}\")\n res = api_update_tag.sync_detailed(\n slug,\n client=client,\n json_body=PutTagsTagJsonBody(\n name,\n description,\n color if color else online_tags[slug].color,\n ),\n )\n if res.status_code != HTTPStatus.OK:\n click.echo(f\"Request failed with content={res.content}\")\n failures.add(slug)\n else:\n # Create\n click.echo(f\"Creating tag {slug}\")\n res = api_create_tag.sync_detailed(\n client=client,\n json_body=PostTagsJsonBody(\n name,\n slug,\n description,\n color=color if color else UNSET,\n ),\n )\n if res.status_code != HTTPStatus.CREATED:\n click.echo(f\"Request failed with content={res.content}\", err=True)\n failures.add(slug)\n\n if failures:\n click.echo(f\"Completed with failures: {failures}\", err=True)\n sys.exit(1)",
"def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i",
"def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags",
"def TagResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'lambda':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n TagName: TagValue\n }\n\t )\n elif self.Service == 'dax':\n response = Client.tag_resource (\n ResourceName = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'directconnect':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'key': TagName,\n\t\t\t'value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'dynamodb':\n response = Client.tag_resource (\n ResourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'kms':\n response = Client.tag_resource (\n KeyId = ResourceId,\n\t\tTags = [\n\t\t {\n 'TagKey': TagName,\n\t\t\t'TagValue': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'apigateway':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\ttags = [\n\t\t {\n\t\t\tTagName: TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'secretsmanager':\n response = Client.tag_resource (\n SecretId = ResourceId,\n\t\tTags = [\n\t\t {\n\t\t\t'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'cloudfront':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n 'Items': [\n\t\t {\n\t\t\t 'Key': TagName,\n\t\t\t 'Value': TagValue\n\t\t }\n ]\n\t\t}\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def create_instances_request(nodes, placement_groups=None, exclusive=False):\n assert len(nodes) > 0\n assert len(nodes) <= BULK_INSERT_LIMIT\n # model here indicates any node that can be used to describe the rest\n model = next(iter(nodes))\n partition = lkp.node_partition(model)\n template = lkp.node_template(model)\n region = lkp.node_region(model)\n\n body = NSDict()\n body.count = len(nodes)\n if not exclusive:\n body.minCount = 1\n\n # source of instance properties\n body.sourceInstanceTemplate = template\n\n # overwrites properties accross all instances\n body.instanceProperties = instance_properties(partition, model)\n\n # key is instance name, value overwrites properties\n body.perInstanceProperties = {\n k: per_instance_properties(k, placement_groups) for k in nodes\n }\n\n zones = {\n **{\n f\"zones/{zone}\": {\"preference\": \"ALLOW\"}\n for zone in partition.zone_policy_allow or []\n },\n **{\n f\"zones/{zone}\": {\"preference\": \"DENY\"}\n for zone in partition.zone_policy_deny or []\n },\n }\n if zones:\n body.locationPolicy = {\"locations\": zones}\n\n request = util.compute.regionInstances().bulkInsert(\n project=cfg.project, region=region, body=body.to_dict()\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n f\"new request: endpoint={request.methodId} nodes={to_hostlist(nodes)}\"\n )\n log_api_request(request)\n return request",
"def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})",
"def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True",
"def tags(self, tags: List[Tag]):\n\n self._tags = tags",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)",
"def startstop_instances(module, ecs, instance_ids, state, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n # Fail unless the user defined instance tags\n if not instance_tags:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n\n # To make an ECS tag filter, we need to prepend 'tag:' to each key.\n # An empty filter does no filtering, so it's safe to pass it to the\n # get_all_instances method even if the user did not specify instance_tags\n filters = []\n if instance_tags:\n for inst_tag in instance_tags:\n tag = {}\n tag[\"tag:\" + inst_tag['tag_key']] = inst_tag['tag_value']\n filters.append(tag)\n # Check (and eventually change) instances attributes and instances state\n running_instances_array = []\n region, connect_args = get_acs_connection_info(module)\n connect_args['force'] = module.params.get('force', None)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state != state:\n instance_dict_array.append(get_instance_info(inst))\n try:\n if state == 'running':\n inst.start()\n elif state == 'restarted':\n inst.reboot()\n else:\n inst.stop()\n except ECSResponseError as e:\n module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, instance_ids)",
"def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")",
"def add_tagging(self, task_instance):\n dag_run = task_instance.dag_run\n task = task_instance.task\n\n with sentry_sdk.configure_scope() as scope:\n for tag_name in self.SCOPE_TASK_INSTANCE_TAGS:\n attribute = getattr(task_instance, tag_name)\n scope.set_tag(tag_name, attribute)\n for tag_name in self.SCOPE_DAG_RUN_TAGS:\n attribute = getattr(dag_run, tag_name)\n scope.set_tag(tag_name, attribute)\n scope.set_tag(\"operator\", task.__class__.__name__)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def register_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.register_instances(self.name, instances)\r\n self.instances = new_instances",
"def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)",
"def create_tags(self, resource_ids, tags):\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('CreateTags', params, verb='POST')",
"def load_tags(tag_list):\n\n tag_insert = \"INSERT INTO release_tag VALUES\" \\\n \" (?, ?, ?, ?, ?, ?)\"\n dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)",
"def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)",
"def add_tag(args):\n\n if not args.nodespec and not args.software_profile and \\\n not args.hardware_profile:\n sys.stderr.write('Error: must specify --nodes'\n '/--software-profile/--hardware-profile\\n')\n sys.stderr.flush()\n sys.exit(1)\n\n session = DbManager().openSession()\n\n try:\n nodes = []\n softwareprofiles = []\n hardwareprofiles = []\n\n if args.nodespec:\n nodespec = args.nodespec.replace('*', '%')\n\n nodes = NodesDbHandler().getNodesByNameFilter(\n session, nodespec)\n\n if not nodes:\n sys.stderr.write(\n 'No nodes matching nodespec [{0}]\\n'.format(\n args.nodespec))\n\n sys.stderr.flush()\n\n sys.exit(1)\n\n if args.software_profile:\n softwareprofile_names = args.software_profile.split(',')\n\n for softwareprofile_name in softwareprofile_names:\n softwareprofile = SoftwareProfilesDbHandler().\\\n getSoftwareProfile(session, softwareprofile_name)\n\n softwareprofiles.append(softwareprofile)\n\n if args.hardware_profile:\n hardwareprofile_names = args.hardware_profile.split(',')\n\n for hardwareprofile_name in hardwareprofile_names:\n hardwareprofile = HardwareProfilesDbHandler().\\\n getHardwareProfile(session, hardwareprofile_name)\n\n hardwareprofiles.append(hardwareprofile)\n\n # Create list of 'Tags' database objects\n tag_objs = get_tag_objects(session, args.tags)\n\n # Associate with nodes\n for node in nodes or []:\n for tag_obj in tag_objs:\n if tag_obj in node.tags:\n # Tag already exists\n continue\n\n node.tags.append(tag_obj)\n\n print(node.name, node.tags)\n\n # Associate with software profiles\n for softwareprofile in softwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in softwareprofile.tags:\n continue\n\n softwareprofile.tags.append(tag_obj)\n\n # Associate with hardware profiles\n for hardwareprofile in hardwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in hardwareprofile.tags:\n continue\n\n hardwareprofile.tags.append(tag_obj)\n\n session.commit()\n finally:\n DbManager().closeSession()",
"def _add_tags_to_housekeeper(self, store: bool, tags: List[str]) -> None:\n for tag in tags:\n if store and self.hk.get_tag(name=tag) is None:\n self.hk.add_commit(self.hk.new_tag(tag))",
"def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):\n tags = []\n client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)\n result = client.get_all_tags(filters={\"resource-id\": instance_id})\n if result:\n for tag in result:\n tags.append({tag.name: tag.value})\n else:\n log.info(\"No tags found for instance_id %s\", instance_id)\n return tags"
] |
[
"0.74873763",
"0.65912956",
"0.65561086",
"0.6501582",
"0.6467801",
"0.6455448",
"0.6356754",
"0.63517284",
"0.63239217",
"0.6164876",
"0.60988724",
"0.60375375",
"0.599445",
"0.5963434",
"0.58057624",
"0.57843035",
"0.57726794",
"0.5756584",
"0.5738389",
"0.57328016",
"0.57313013",
"0.57230544",
"0.5686239",
"0.5677894",
"0.56662965",
"0.56567246",
"0.565524",
"0.56531125",
"0.565146",
"0.56451654",
"0.56209767",
"0.56205976",
"0.5612001",
"0.55944926",
"0.5583458",
"0.5570526",
"0.5557534",
"0.55464417",
"0.55440867",
"0.5515398",
"0.55125785",
"0.548511",
"0.5471506",
"0.5458045",
"0.5438132",
"0.5433872",
"0.5424822",
"0.5414369",
"0.5414369",
"0.5414369",
"0.5374374",
"0.5371427",
"0.53688776",
"0.5359795",
"0.53516203",
"0.5345461",
"0.53426623",
"0.53420615",
"0.5325097",
"0.5316941",
"0.5300474",
"0.52976036",
"0.52849084",
"0.52839524",
"0.528006",
"0.5268271",
"0.5254624",
"0.52427626",
"0.52422947",
"0.52421826",
"0.52406996",
"0.523975",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52339345",
"0.5222822",
"0.5211482",
"0.51988596",
"0.5196789",
"0.5185682",
"0.51794845",
"0.5176306",
"0.5167229",
"0.51289487",
"0.51284665",
"0.51248205",
"0.5121572",
"0.5119399",
"0.5118134",
"0.51158065",
"0.5109637",
"0.51061803",
"0.51058537"
] |
0.0
|
-1
|
You can create multiple tags and bind them to multiple instances. This allows you to classify and filter instances by tag. A tag consists of a key and a value. Each key must be unique in a region for an Alibaba Cloud account. Different keys can have the same value. If the tag you specify does not exist, this tag is automatically created and bound to the specified instance. If a tag that has the same key is already bound to the instance, the new tag overwrites the existing tag. You can bind up to 20 tags to each instance. You can bind tags to up to 50 instances each time you call the operation.
|
async def tag_resources_with_options_async(
self,
request: dds_20151201_models.TagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.TagResourcesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.region_id):
query['RegionId'] = request.region_id
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag):
query['Tag'] = request.tag
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='TagResources',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.TagResourcesResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')",
"def _BindSecureTagsToInstances(\n network_name, project, tag_mapping_file_name, compute_client\n):\n tag_mapping = _ReadTagMapping(tag_mapping_file_name)\n if not tag_mapping:\n return\n\n vm_instances = _GetInstancesInNetwork(project, network_name, compute_client)\n\n for vm in vm_instances:\n _BindTagsToInstance(tag_mapping, vm)\n _BindServiceTagsToInstance(tag_mapping, vm)",
"def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)",
"def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)",
"def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response",
"def tag_instance_subnet(self, tags):\n self._request({\"instance-subnet-tags\": dict(tags)})",
"def create_tags(tags_list):\n\n Tags.create_multiple(tags_list)",
"def attachInstanceTags(instance_id, tags):\n \n empty = False\n lambda_client = boto3.client('lambda')\n data = {\n 'comp_name': \"attachInstanceTags\", \n 'action': \"attach tags\", \n 'level': \"info\", \n 'msg': \"attached \" + str(tags) + \" to instance \" + instance_id\n } \n try:\n client = boto3.client('ec2')\n response = client.create_tags(\n Resources=[instance_id],\n Tags= tags\n )\n print(\"Attached tags to instance\")\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':\n print(\"No such instance exists\")\n empty = True\n else:\n print(\"Error attaching tags to instance: \" + str(e))\n \n if (not empty):\n invoke_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"notify_snitch\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(data)\n )",
"def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})",
"def create_or_update_tags(self, Tags):\n tag = Tags[0]\n asg_name = tag['ResourceId']\n ec2_tag = {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n try:\n response = self.asg.create_or_update_tags(\n Tags=Tags\n )\n except Exception as e:\n logger.error('Unknown Error: %s', str(e))\n else:\n logger.info(response)\n\n asg_instances = self.get_asg_instance_ids(asg_name)\n return EC2Wrapper(self.session).create_tags(Resources=asg_instances, Tags=[ec2_tag])",
"def AddInstanceTags(self, instance, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def _BindTagToInstance(tag_value, instance):\n messages = rm_tags.TagMessages()\n resource_name = _GetFullCanonicalResourceName(instance)\n\n tag_binding = messages.TagBinding(parent=resource_name, tagValue=tag_value)\n binding_req = messages.CloudresourcemanagerTagBindingsCreateRequest(\n tagBinding=tag_binding\n )\n\n location = _GetInstanceLocation(instance)\n\n with endpoints.CrmEndpointOverrides(location):\n try:\n op = rm_tags.TagBindingsService().Create(binding_req)\n if not op.done:\n operations.WaitForReturnOperation(\n op,\n 'Waiting for TagBinding for parent [{}] and tag value [{}] to be '\n 'created with [{}]'.format(resource_name, tag_value, op.name),\n )\n except Exception as e: # pylint: disable=broad-except\n log.status.Print('Tag binding could not be created: ' + repr(e))",
"def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()",
"def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)",
"def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value",
"def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def CreateTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'ec2':\n response = Client.create_tags(\n Resources = [\n\t\t ResourceId\n\t\t],\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'efs':\n response = Client.create_tags(\n FileSystemId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'redshift':\n response = Client.create_tags(\n ResourceName = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'workspaces':\n response = Client.create_tags(\n ResourceId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def create_tags(configurationIds=None, tags=None):\n pass",
"def upsert_tags(self, entry, tags):\n if not tags:\n return\n\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for tag in tags:\n logging.info('Processing Tag from Template: %s ...', tag.template)\n\n tag_to_create = tag\n tag_to_update = None\n for persisted_tag in persisted_tags:\n # The column field is not case sensitive.\n if tag.template == persisted_tag.template and \\\n tag.column.lower() == persisted_tag.column.lower():\n\n tag_to_create = None\n tag.name = persisted_tag.name\n if not self.__tag_fields_are_equal(tag, persisted_tag):\n tag_to_update = tag\n break\n\n if tag_to_create:\n created_tag = self.create_tag(entry.name, tag_to_create)\n logging.info('Tag created: %s', created_tag.name)\n elif tag_to_update:\n self.update_tag(tag_to_update)\n logging.info('Tag updated: %s', tag_to_update.name)\n else:\n logging.info('Tag is up-to-date: %s', tag.name)",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def tag(self, uuid, tags):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.tag(uuid, tags)",
"def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances",
"def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()",
"def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})",
"def create_tags(ResourceArn=None, Tags=None):\n pass",
"def tags():",
"def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)",
"def AddClusterTags(self, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"def add_tag(names, tag):\n for name in names:\n b = Box.query.filter_by(name=name).first()\n b.tags.add(tag)\n db.session.commit()",
"def add_tagging(self, task_instance):",
"def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)",
"def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None) -> Iterable[List[Boto2Instance]]:\n def spotRequestNotFound(e):\n return getattr(e, 'error_code', None) == \"InvalidSpotInstanceRequestID.NotFound\"\n\n for attempt in retry_ec2(retry_for=a_long_time,\n retry_while=inconsistencies_detected):\n with attempt:\n requests = ec2.request_spot_instances(\n price, image_id, count=num_instances, **spec)\n\n if tags is not None:\n for requestID in (request.id for request in requests):\n for attempt in retry_ec2(retry_while=spotRequestNotFound):\n with attempt:\n ec2.create_tags([requestID], tags)\n\n num_active, num_other = 0, 0\n # noinspection PyUnboundLocalVariable,PyTypeChecker\n # request_spot_instances's type annotation is wrong\n for batch in wait_spot_requests_active(ec2,\n requests,\n timeout=timeout,\n tentative=tentative):\n instance_ids = []\n for request in batch:\n if request.state == 'active':\n instance_ids.append(request.instance_id)\n num_active += 1\n else:\n logger.info(\n 'Request %s in unexpected state %s.',\n request.id,\n request.state)\n num_other += 1\n if instance_ids:\n # This next line is the reason we batch. It's so we can get multiple instances in\n # a single request.\n yield ec2.get_only_instances(instance_ids)\n if not num_active:\n message = 'None of the spot requests entered the active state'\n if tentative:\n logger.warning(message + '.')\n else:\n raise RuntimeError(message)\n if num_other:\n logger.warning('%i request(s) entered a state other than active.', num_other)",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def AddTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'es':\n response = Client.add_tags (\n ARN = ResourceId,\n TagList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'emr':\n response = Client.add_tags (\n ResourceId = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'cloudtrail':\n response = Client.add_tags (\n ResourceId = ResourceId,\n TagsList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'sagemaker':\n response = Client.add_tags (\n ResourceArn = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'datapipeline':\n response = Client.add_tags (\n pipelineId = ResourceId,\n tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def create_tags(apps, _):\n type_model = apps.get_model(\"projects\", \"Type\")\n tag_model = apps.get_model(\"projects\", \"Tag\")\n\n # Tags which are a 1:1 migration\n global education_tag\n global covid_tag\n global innovative_tag\n global other_tag\n education_tag = tag_model(\n name=\"Computing Education\",\n description=\"Seeding inclusive computing education for the next generation \"\n \"and all computer-science training\",\n )\n covid_tag = tag_model(\n name=\"COVID\",\n description=\"Related to COVID-19\",\n )\n innovative_tag = tag_model(\n name=\"Innovative Application\", description=\"Applications for domain sciences\"\n )\n other_tag = tag_model(\n name=\"Other\",\n description=\"My project research area doesn’t fit in any of \"\n \"the predefined categories\",\n )\n\n tags = [\n education_tag,\n covid_tag,\n innovative_tag,\n other_tag,\n tag_model(\n name=\"Computer Architecture\",\n description=\"Designing computer systems optimized for high performance, \"\n \"energy efficiency, and scalability\",\n ),\n tag_model(\n name=\"Data Science\",\n description=\"Developing algorithms for managing and analyzing data at scale\",\n ),\n tag_model(\n name=\"Database Systems\",\n description=\"Designing systems for managing and storing data at scale\",\n ),\n tag_model(\n name=\"Human Computer Interaction\",\n description=\"Exploring the interfaces between people and technologies\",\n ),\n tag_model(\n name=\"AI and Machine Learning\",\n description=\"Foundations and applications of computer algorithms making \"\n \"data-centric models, predictions, and decisions\",\n ),\n tag_model(\n name=\"Networking\",\n description=\"Analysis, design, implementation, and use of local, \"\n \"wide-area, and mobile networks that link computers together\",\n ),\n tag_model(\n name=\"Programming Languages\",\n description=\"Devising new and better ways of programming the computers\",\n ),\n tag_model(\n name=\"Robotics\",\n description=\"Design, construction, operation, and use of robots\",\n ),\n tag_model(\n name=\"Scientific and High-Performance Computing\",\n description=\"Scientific discovery at the frontiers of computational \"\n \"performance, intelligence, and scale\",\n ),\n tag_model(\n name=\"Security and Privacy\",\n description=\"Understanding and defending against emerging threats in our \"\n \"increasingly computational world\",\n ),\n tag_model(\n name=\"Software Engineering\",\n description=\"Design, development, testing, and maintenance of \"\n \"software applications\",\n ),\n tag_model(\n name=\"Distributed Systems\",\n description=\"Harness the power of multiple computational units\",\n ),\n tag_model(\n name=\"Operating Systems\",\n description=\"Analysis, design, and implementation of operating systems\",\n ),\n tag_model(\n name=\"Storage Systems\",\n description=\"Capturing, managing, securing, and prioritizing data\",\n ),\n tag_model(\n name=\"Cloud Computing\",\n description=\"Delivering computing services over the Internet to offer \"\n \"faster innovation, flexible resources, and economies of scale\",\n ),\n tag_model(\n name=\"Edge Computing\",\n description=\"Bring applications closer to data sources such as IoT \"\n \"devices or local edge servers\",\n ),\n tag_model(\n name=\"Vision and Graphics\",\n description=\"Creating and analyzing data from the visual world, \"\n \"and visually understanding complex data\",\n ),\n tag_model(\n name=\"Theory of Computation\",\n description=\"Mathematical foundations of computation, including \"\n \"algorithm design, complexity and logic\",\n ),\n tag_model(\n name=\"Daypass\",\n description=\"Daypass project\",\n expose=False,\n ),\n ]\n\n tag_model.objects.bulk_create(tags)\n\n if type_model.objects.count() == 0:\n return\n covid_type = type_model.objects.get(name=\"COVID\")\n research_type = type_model.objects.get(name=\"CS Research\")\n education_type = type_model.objects.get(name=\"Education\")\n innovative_type = type_model.objects.get(name=\"Innovative Application\")\n\n # Gather the old tags. We have to remove the type model from the project model\n # to add the projects to the new tag model,\n # So all we do is collect them here, and then move them later.\n global old_covid_projects\n global old_research_projects\n global old_education_projects\n global old_innovative_projects\n old_covid_projects = list(covid_type.project_type.all())\n old_research_projects = list(research_type.project_type.all())\n old_education_projects = list(education_type.project_type.all())\n old_innovative_projects = list(innovative_type.project_type.all())",
"def add_tags(self, image_id, tags):\n\t\tfor tag in tags:\n\t\t\timage_tag = ImageTag(image_id=image_id, tag_id=tag)\n\t\t\tself.session.add(image_tag)\n\n\t\tself.session.commit()",
"def tag(profile, internet_gateway, key, value):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"Resources\"] = [internet_gateway]\n params[\"Tags\"] = [{\"Key\": key, \"Value\": value}]\n return client.create_tags(**params)",
"def add_tags_to_resource(ResourceId=None, Tags=None):\n pass",
"def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def createPatchTag(tags, instance_id, nt_id):\n\n client = boto3.client('ssm')\n response = client.describe_instance_information(\n InstanceInformationFilterList=[\n {\n 'key': 'InstanceIds',\n 'valueSet': [instance_id]\n }\n ]\n )\n patch_tag_value = ''\n platform_name = ''\n if (response['InstanceInformationList']):\n platform_name = response['InstanceInformationList'][0]['PlatformName'] \n if 'Red Hat Enterprise Linux' in platform_name:\n patch_tag_value = 'default-rhel'\n elif 'Windows' in platform_name:\n patch_tag_value = 'default-windows'\n elif 'Ubuntu' in platform_name:\n patch_tag_value = 'default-ubuntu'\n elif 'Centos' in platform_name:\n patch_tag_value = 'default-centos'\n elif 'Amazon Linux 2' in platform_name:\n patch_tag_value = 'default-amazon2'\n elif 'Amazon Linux' in platform_name:\n patch_tag_value = 'default-amazon'\n else:\n print(\"No patch group found for platform\")\n patch_tag_value = 'Not yet populated'\n\n return patch_tag_value",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def register_elb_instances(elbclient, elbname, instance_ids):\r\n Instances = list(map(\r\n lambda x: {'InstanceId': x},\r\n instance_ids\r\n ))\r\n try:\r\n elbclient.register_instances_with_load_balancer(\r\n LoadBalancerName=elbname,\r\n Instances=Instances,\r\n DryRun=True\r\n )\r\n except Exception as ex:\r\n print(ex.message)\r\n return False\r\n return True",
"def pool_into(self, target):\n for taggable in taggables().values():\n for t in taggable.by_user(self.owner).filter(tags=self):\n t.tag(target)",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def AddTagsToResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'rds':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'elasticache':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'ds':\n response = Client.add_tags_to_resource (\n ResourceId = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)",
"def add_tags(ResourceArn=None, Tags=None):\n pass",
"def start(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.start_instances(InstanceIds=[instance_id])\n print(f\"Start instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def tag_resource(resourceArn=None, tags=None):\n pass",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})",
"def create_instances(ec2_resource: ServiceResource,\n image_id: str,\n key_name: str,\n instance_type: str,\n num_instances: int = 1,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: str = None,\n tags: Optional[Dict[str, str]] = None) -> List[dict]:\n logger.info('Creating %s instance(s) ... ', instance_type)\n\n if isinstance(user_data, str):\n user_data = user_data.encode('utf-8')\n\n request = {'ImageId': image_id,\n 'MinCount': num_instances,\n 'MaxCount': num_instances,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n request['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n request['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n return ec2_resource.create_instances(**prune(request))",
"def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)",
"async def add_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"insert into tags(tag_name, map_id) \n select ?, (select map_id from maps where map_path=?)\n where not exists\n (select * from tags where tag_name = ? and map_id = (select map_id from maps where map_path=?)) \"\"\"\n select(conn, insert_sql, (tag, map_name, tag, map_name))\n await channel.send(f\"Added tags `{' '.join(tags)}` for map {map_name} if it wasn't set\")",
"def create_tags(tags_path: Path, email: str, password: str, host_url: str):\n with open(tags_path) as f:\n tags_json = json.load(f)\n\n client = client_util.make_client(host_url, email, password)\n\n # Build dictionary of tags as they exist on the server, mapped by slug.\n online_tags_resp = api_get_tags.sync_detailed(client=client)\n if online_tags_resp.status_code != HTTPStatus.OK:\n click.echo(f\"Request to get tags failed with status {online_tags_resp}\")\n exit(1)\n online_tags = {\n online_tag.slug: online_tag for online_tag in online_tags_resp.parsed\n }\n\n # Record slugs of tags that failed.\n failures = set()\n\n for tag in tags_json[\"tags\"]:\n slug = tag[\"slug\"]\n name = tag[\"name\"]\n description = tag[\"description\"]\n color = tag.get(\"color\")\n\n if slug in online_tags:\n # Update\n online_tag = online_tags[slug]\n if (\n name == online_tag.name\n and description == online_tag.description\n and (color is None or color == online_tag.color)\n ):\n click.echo(f\"Tag {slug} is already up to date.\")\n else:\n click.echo(f\"Updating tag {slug}\")\n res = api_update_tag.sync_detailed(\n slug,\n client=client,\n json_body=PutTagsTagJsonBody(\n name,\n description,\n color if color else online_tags[slug].color,\n ),\n )\n if res.status_code != HTTPStatus.OK:\n click.echo(f\"Request failed with content={res.content}\")\n failures.add(slug)\n else:\n # Create\n click.echo(f\"Creating tag {slug}\")\n res = api_create_tag.sync_detailed(\n client=client,\n json_body=PostTagsJsonBody(\n name,\n slug,\n description,\n color=color if color else UNSET,\n ),\n )\n if res.status_code != HTTPStatus.CREATED:\n click.echo(f\"Request failed with content={res.content}\", err=True)\n failures.add(slug)\n\n if failures:\n click.echo(f\"Completed with failures: {failures}\", err=True)\n sys.exit(1)",
"def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i",
"def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags",
"def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def create_instances_request(nodes, placement_groups=None, exclusive=False):\n assert len(nodes) > 0\n assert len(nodes) <= BULK_INSERT_LIMIT\n # model here indicates any node that can be used to describe the rest\n model = next(iter(nodes))\n partition = lkp.node_partition(model)\n template = lkp.node_template(model)\n region = lkp.node_region(model)\n\n body = NSDict()\n body.count = len(nodes)\n if not exclusive:\n body.minCount = 1\n\n # source of instance properties\n body.sourceInstanceTemplate = template\n\n # overwrites properties accross all instances\n body.instanceProperties = instance_properties(partition, model)\n\n # key is instance name, value overwrites properties\n body.perInstanceProperties = {\n k: per_instance_properties(k, placement_groups) for k in nodes\n }\n\n zones = {\n **{\n f\"zones/{zone}\": {\"preference\": \"ALLOW\"}\n for zone in partition.zone_policy_allow or []\n },\n **{\n f\"zones/{zone}\": {\"preference\": \"DENY\"}\n for zone in partition.zone_policy_deny or []\n },\n }\n if zones:\n body.locationPolicy = {\"locations\": zones}\n\n request = util.compute.regionInstances().bulkInsert(\n project=cfg.project, region=region, body=body.to_dict()\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n f\"new request: endpoint={request.methodId} nodes={to_hostlist(nodes)}\"\n )\n log_api_request(request)\n return request",
"def TagResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'lambda':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n TagName: TagValue\n }\n\t )\n elif self.Service == 'dax':\n response = Client.tag_resource (\n ResourceName = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'directconnect':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'key': TagName,\n\t\t\t'value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'dynamodb':\n response = Client.tag_resource (\n ResourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'kms':\n response = Client.tag_resource (\n KeyId = ResourceId,\n\t\tTags = [\n\t\t {\n 'TagKey': TagName,\n\t\t\t'TagValue': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'apigateway':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\ttags = [\n\t\t {\n\t\t\tTagName: TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'secretsmanager':\n response = Client.tag_resource (\n SecretId = ResourceId,\n\t\tTags = [\n\t\t {\n\t\t\t'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'cloudfront':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n 'Items': [\n\t\t {\n\t\t\t 'Key': TagName,\n\t\t\t 'Value': TagValue\n\t\t }\n ]\n\t\t}\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})",
"def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True",
"def tags(self, tags: List[Tag]):\n\n self._tags = tags",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)",
"def startstop_instances(module, ecs, instance_ids, state, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n # Fail unless the user defined instance tags\n if not instance_tags:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n\n # To make an ECS tag filter, we need to prepend 'tag:' to each key.\n # An empty filter does no filtering, so it's safe to pass it to the\n # get_all_instances method even if the user did not specify instance_tags\n filters = []\n if instance_tags:\n for inst_tag in instance_tags:\n tag = {}\n tag[\"tag:\" + inst_tag['tag_key']] = inst_tag['tag_value']\n filters.append(tag)\n # Check (and eventually change) instances attributes and instances state\n running_instances_array = []\n region, connect_args = get_acs_connection_info(module)\n connect_args['force'] = module.params.get('force', None)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state != state:\n instance_dict_array.append(get_instance_info(inst))\n try:\n if state == 'running':\n inst.start()\n elif state == 'restarted':\n inst.reboot()\n else:\n inst.stop()\n except ECSResponseError as e:\n module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, instance_ids)",
"def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")",
"def add_tagging(self, task_instance):\n dag_run = task_instance.dag_run\n task = task_instance.task\n\n with sentry_sdk.configure_scope() as scope:\n for tag_name in self.SCOPE_TASK_INSTANCE_TAGS:\n attribute = getattr(task_instance, tag_name)\n scope.set_tag(tag_name, attribute)\n for tag_name in self.SCOPE_DAG_RUN_TAGS:\n attribute = getattr(dag_run, tag_name)\n scope.set_tag(tag_name, attribute)\n scope.set_tag(\"operator\", task.__class__.__name__)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def register_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.register_instances(self.name, instances)\r\n self.instances = new_instances",
"def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)",
"def load_tags(tag_list):\n\n tag_insert = \"INSERT INTO release_tag VALUES\" \\\n \" (?, ?, ?, ?, ?, ?)\"\n dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)",
"def create_tags(self, resource_ids, tags):\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('CreateTags', params, verb='POST')",
"def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)",
"def add_tag(args):\n\n if not args.nodespec and not args.software_profile and \\\n not args.hardware_profile:\n sys.stderr.write('Error: must specify --nodes'\n '/--software-profile/--hardware-profile\\n')\n sys.stderr.flush()\n sys.exit(1)\n\n session = DbManager().openSession()\n\n try:\n nodes = []\n softwareprofiles = []\n hardwareprofiles = []\n\n if args.nodespec:\n nodespec = args.nodespec.replace('*', '%')\n\n nodes = NodesDbHandler().getNodesByNameFilter(\n session, nodespec)\n\n if not nodes:\n sys.stderr.write(\n 'No nodes matching nodespec [{0}]\\n'.format(\n args.nodespec))\n\n sys.stderr.flush()\n\n sys.exit(1)\n\n if args.software_profile:\n softwareprofile_names = args.software_profile.split(',')\n\n for softwareprofile_name in softwareprofile_names:\n softwareprofile = SoftwareProfilesDbHandler().\\\n getSoftwareProfile(session, softwareprofile_name)\n\n softwareprofiles.append(softwareprofile)\n\n if args.hardware_profile:\n hardwareprofile_names = args.hardware_profile.split(',')\n\n for hardwareprofile_name in hardwareprofile_names:\n hardwareprofile = HardwareProfilesDbHandler().\\\n getHardwareProfile(session, hardwareprofile_name)\n\n hardwareprofiles.append(hardwareprofile)\n\n # Create list of 'Tags' database objects\n tag_objs = get_tag_objects(session, args.tags)\n\n # Associate with nodes\n for node in nodes or []:\n for tag_obj in tag_objs:\n if tag_obj in node.tags:\n # Tag already exists\n continue\n\n node.tags.append(tag_obj)\n\n print(node.name, node.tags)\n\n # Associate with software profiles\n for softwareprofile in softwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in softwareprofile.tags:\n continue\n\n softwareprofile.tags.append(tag_obj)\n\n # Associate with hardware profiles\n for hardwareprofile in hardwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in hardwareprofile.tags:\n continue\n\n hardwareprofile.tags.append(tag_obj)\n\n session.commit()\n finally:\n DbManager().closeSession()",
"def _add_tags_to_housekeeper(self, store: bool, tags: List[str]) -> None:\n for tag in tags:\n if store and self.hk.get_tag(name=tag) is None:\n self.hk.add_commit(self.hk.new_tag(tag))",
"def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):\n tags = []\n client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)\n result = client.get_all_tags(filters={\"resource-id\": instance_id})\n if result:\n for tag in result:\n tags.append({tag.name: tag.value})\n else:\n log.info(\"No tags found for instance_id %s\", instance_id)\n return tags"
] |
[
"0.7487185",
"0.65916544",
"0.6556984",
"0.6501802",
"0.64678895",
"0.6454626",
"0.6356658",
"0.635133",
"0.63240445",
"0.6164839",
"0.6097934",
"0.6037268",
"0.5994",
"0.59635496",
"0.5806253",
"0.57834727",
"0.577199",
"0.5755929",
"0.5738129",
"0.57312036",
"0.57304275",
"0.5722226",
"0.5685364",
"0.56768584",
"0.56648546",
"0.5656488",
"0.56547266",
"0.56522703",
"0.5651411",
"0.564497",
"0.5620931",
"0.56190526",
"0.5610558",
"0.5593968",
"0.5582739",
"0.55700374",
"0.5556906",
"0.5545231",
"0.5544329",
"0.5513783",
"0.55116355",
"0.54854536",
"0.54703885",
"0.54567623",
"0.5437359",
"0.5432589",
"0.54244524",
"0.5413352",
"0.5413352",
"0.5413352",
"0.5373807",
"0.5369346",
"0.53691274",
"0.5359833",
"0.5350921",
"0.5344637",
"0.5342173",
"0.53406054",
"0.5325798",
"0.53149086",
"0.53009903",
"0.529638",
"0.528434",
"0.52839196",
"0.52795345",
"0.5267247",
"0.52540356",
"0.5242548",
"0.5242414",
"0.52416116",
"0.52410626",
"0.5238465",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5238307",
"0.5232474",
"0.5222484",
"0.5210827",
"0.51977515",
"0.51953113",
"0.5185736",
"0.51782656",
"0.5176004",
"0.5166971",
"0.51308566",
"0.5128788",
"0.5125094",
"0.5120859",
"0.5118337",
"0.5117827",
"0.5115758",
"0.51093",
"0.5105582",
"0.5105472"
] |
0.0
|
-1
|
You can create multiple tags and bind them to multiple instances. This allows you to classify and filter instances by tag. A tag consists of a key and a value. Each key must be unique in a region for an Alibaba Cloud account. Different keys can have the same value. If the tag you specify does not exist, this tag is automatically created and bound to the specified instance. If a tag that has the same key is already bound to the instance, the new tag overwrites the existing tag. You can bind up to 20 tags to each instance. You can bind tags to up to 50 instances each time you call the operation.
|
def tag_resources(
self,
request: dds_20151201_models.TagResourcesRequest,
) -> dds_20151201_models.TagResourcesResponse:
runtime = util_models.RuntimeOptions()
return self.tag_resources_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')",
"def _BindSecureTagsToInstances(\n network_name, project, tag_mapping_file_name, compute_client\n):\n tag_mapping = _ReadTagMapping(tag_mapping_file_name)\n if not tag_mapping:\n return\n\n vm_instances = _GetInstancesInNetwork(project, network_name, compute_client)\n\n for vm in vm_instances:\n _BindTagsToInstance(tag_mapping, vm)\n _BindServiceTagsToInstance(tag_mapping, vm)",
"def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)",
"def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)",
"def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response",
"def tag_instance_subnet(self, tags):\n self._request({\"instance-subnet-tags\": dict(tags)})",
"def create_tags(tags_list):\n\n Tags.create_multiple(tags_list)",
"def attachInstanceTags(instance_id, tags):\n \n empty = False\n lambda_client = boto3.client('lambda')\n data = {\n 'comp_name': \"attachInstanceTags\", \n 'action': \"attach tags\", \n 'level': \"info\", \n 'msg': \"attached \" + str(tags) + \" to instance \" + instance_id\n } \n try:\n client = boto3.client('ec2')\n response = client.create_tags(\n Resources=[instance_id],\n Tags= tags\n )\n print(\"Attached tags to instance\")\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':\n print(\"No such instance exists\")\n empty = True\n else:\n print(\"Error attaching tags to instance: \" + str(e))\n \n if (not empty):\n invoke_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"notify_snitch\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(data)\n )",
"def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})",
"def create_or_update_tags(self, Tags):\n tag = Tags[0]\n asg_name = tag['ResourceId']\n ec2_tag = {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n try:\n response = self.asg.create_or_update_tags(\n Tags=Tags\n )\n except Exception as e:\n logger.error('Unknown Error: %s', str(e))\n else:\n logger.info(response)\n\n asg_instances = self.get_asg_instance_ids(asg_name)\n return EC2Wrapper(self.session).create_tags(Resources=asg_instances, Tags=[ec2_tag])",
"def AddInstanceTags(self, instance, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def _BindTagToInstance(tag_value, instance):\n messages = rm_tags.TagMessages()\n resource_name = _GetFullCanonicalResourceName(instance)\n\n tag_binding = messages.TagBinding(parent=resource_name, tagValue=tag_value)\n binding_req = messages.CloudresourcemanagerTagBindingsCreateRequest(\n tagBinding=tag_binding\n )\n\n location = _GetInstanceLocation(instance)\n\n with endpoints.CrmEndpointOverrides(location):\n try:\n op = rm_tags.TagBindingsService().Create(binding_req)\n if not op.done:\n operations.WaitForReturnOperation(\n op,\n 'Waiting for TagBinding for parent [{}] and tag value [{}] to be '\n 'created with [{}]'.format(resource_name, tag_value, op.name),\n )\n except Exception as e: # pylint: disable=broad-except\n log.status.Print('Tag binding could not be created: ' + repr(e))",
"def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()",
"def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)",
"def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value",
"def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def CreateTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'ec2':\n response = Client.create_tags(\n Resources = [\n\t\t ResourceId\n\t\t],\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'efs':\n response = Client.create_tags(\n FileSystemId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'redshift':\n response = Client.create_tags(\n ResourceName = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'workspaces':\n response = Client.create_tags(\n ResourceId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def create_tags(configurationIds=None, tags=None):\n pass",
"def upsert_tags(self, entry, tags):\n if not tags:\n return\n\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for tag in tags:\n logging.info('Processing Tag from Template: %s ...', tag.template)\n\n tag_to_create = tag\n tag_to_update = None\n for persisted_tag in persisted_tags:\n # The column field is not case sensitive.\n if tag.template == persisted_tag.template and \\\n tag.column.lower() == persisted_tag.column.lower():\n\n tag_to_create = None\n tag.name = persisted_tag.name\n if not self.__tag_fields_are_equal(tag, persisted_tag):\n tag_to_update = tag\n break\n\n if tag_to_create:\n created_tag = self.create_tag(entry.name, tag_to_create)\n logging.info('Tag created: %s', created_tag.name)\n elif tag_to_update:\n self.update_tag(tag_to_update)\n logging.info('Tag updated: %s', tag_to_update.name)\n else:\n logging.info('Tag is up-to-date: %s', tag.name)",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def tag(self, uuid, tags):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.tag(uuid, tags)",
"def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances",
"def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()",
"def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})",
"def create_tags(ResourceArn=None, Tags=None):\n pass",
"def tags():",
"def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)",
"def AddClusterTags(self, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"def add_tag(names, tag):\n for name in names:\n b = Box.query.filter_by(name=name).first()\n b.tags.add(tag)\n db.session.commit()",
"def add_tagging(self, task_instance):",
"def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)",
"def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None) -> Iterable[List[Boto2Instance]]:\n def spotRequestNotFound(e):\n return getattr(e, 'error_code', None) == \"InvalidSpotInstanceRequestID.NotFound\"\n\n for attempt in retry_ec2(retry_for=a_long_time,\n retry_while=inconsistencies_detected):\n with attempt:\n requests = ec2.request_spot_instances(\n price, image_id, count=num_instances, **spec)\n\n if tags is not None:\n for requestID in (request.id for request in requests):\n for attempt in retry_ec2(retry_while=spotRequestNotFound):\n with attempt:\n ec2.create_tags([requestID], tags)\n\n num_active, num_other = 0, 0\n # noinspection PyUnboundLocalVariable,PyTypeChecker\n # request_spot_instances's type annotation is wrong\n for batch in wait_spot_requests_active(ec2,\n requests,\n timeout=timeout,\n tentative=tentative):\n instance_ids = []\n for request in batch:\n if request.state == 'active':\n instance_ids.append(request.instance_id)\n num_active += 1\n else:\n logger.info(\n 'Request %s in unexpected state %s.',\n request.id,\n request.state)\n num_other += 1\n if instance_ids:\n # This next line is the reason we batch. It's so we can get multiple instances in\n # a single request.\n yield ec2.get_only_instances(instance_ids)\n if not num_active:\n message = 'None of the spot requests entered the active state'\n if tentative:\n logger.warning(message + '.')\n else:\n raise RuntimeError(message)\n if num_other:\n logger.warning('%i request(s) entered a state other than active.', num_other)",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def AddTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'es':\n response = Client.add_tags (\n ARN = ResourceId,\n TagList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'emr':\n response = Client.add_tags (\n ResourceId = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'cloudtrail':\n response = Client.add_tags (\n ResourceId = ResourceId,\n TagsList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'sagemaker':\n response = Client.add_tags (\n ResourceArn = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'datapipeline':\n response = Client.add_tags (\n pipelineId = ResourceId,\n tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def create_tags(apps, _):\n type_model = apps.get_model(\"projects\", \"Type\")\n tag_model = apps.get_model(\"projects\", \"Tag\")\n\n # Tags which are a 1:1 migration\n global education_tag\n global covid_tag\n global innovative_tag\n global other_tag\n education_tag = tag_model(\n name=\"Computing Education\",\n description=\"Seeding inclusive computing education for the next generation \"\n \"and all computer-science training\",\n )\n covid_tag = tag_model(\n name=\"COVID\",\n description=\"Related to COVID-19\",\n )\n innovative_tag = tag_model(\n name=\"Innovative Application\", description=\"Applications for domain sciences\"\n )\n other_tag = tag_model(\n name=\"Other\",\n description=\"My project research area doesn’t fit in any of \"\n \"the predefined categories\",\n )\n\n tags = [\n education_tag,\n covid_tag,\n innovative_tag,\n other_tag,\n tag_model(\n name=\"Computer Architecture\",\n description=\"Designing computer systems optimized for high performance, \"\n \"energy efficiency, and scalability\",\n ),\n tag_model(\n name=\"Data Science\",\n description=\"Developing algorithms for managing and analyzing data at scale\",\n ),\n tag_model(\n name=\"Database Systems\",\n description=\"Designing systems for managing and storing data at scale\",\n ),\n tag_model(\n name=\"Human Computer Interaction\",\n description=\"Exploring the interfaces between people and technologies\",\n ),\n tag_model(\n name=\"AI and Machine Learning\",\n description=\"Foundations and applications of computer algorithms making \"\n \"data-centric models, predictions, and decisions\",\n ),\n tag_model(\n name=\"Networking\",\n description=\"Analysis, design, implementation, and use of local, \"\n \"wide-area, and mobile networks that link computers together\",\n ),\n tag_model(\n name=\"Programming Languages\",\n description=\"Devising new and better ways of programming the computers\",\n ),\n tag_model(\n name=\"Robotics\",\n description=\"Design, construction, operation, and use of robots\",\n ),\n tag_model(\n name=\"Scientific and High-Performance Computing\",\n description=\"Scientific discovery at the frontiers of computational \"\n \"performance, intelligence, and scale\",\n ),\n tag_model(\n name=\"Security and Privacy\",\n description=\"Understanding and defending against emerging threats in our \"\n \"increasingly computational world\",\n ),\n tag_model(\n name=\"Software Engineering\",\n description=\"Design, development, testing, and maintenance of \"\n \"software applications\",\n ),\n tag_model(\n name=\"Distributed Systems\",\n description=\"Harness the power of multiple computational units\",\n ),\n tag_model(\n name=\"Operating Systems\",\n description=\"Analysis, design, and implementation of operating systems\",\n ),\n tag_model(\n name=\"Storage Systems\",\n description=\"Capturing, managing, securing, and prioritizing data\",\n ),\n tag_model(\n name=\"Cloud Computing\",\n description=\"Delivering computing services over the Internet to offer \"\n \"faster innovation, flexible resources, and economies of scale\",\n ),\n tag_model(\n name=\"Edge Computing\",\n description=\"Bring applications closer to data sources such as IoT \"\n \"devices or local edge servers\",\n ),\n tag_model(\n name=\"Vision and Graphics\",\n description=\"Creating and analyzing data from the visual world, \"\n \"and visually understanding complex data\",\n ),\n tag_model(\n name=\"Theory of Computation\",\n description=\"Mathematical foundations of computation, including \"\n \"algorithm design, complexity and logic\",\n ),\n tag_model(\n name=\"Daypass\",\n description=\"Daypass project\",\n expose=False,\n ),\n ]\n\n tag_model.objects.bulk_create(tags)\n\n if type_model.objects.count() == 0:\n return\n covid_type = type_model.objects.get(name=\"COVID\")\n research_type = type_model.objects.get(name=\"CS Research\")\n education_type = type_model.objects.get(name=\"Education\")\n innovative_type = type_model.objects.get(name=\"Innovative Application\")\n\n # Gather the old tags. We have to remove the type model from the project model\n # to add the projects to the new tag model,\n # So all we do is collect them here, and then move them later.\n global old_covid_projects\n global old_research_projects\n global old_education_projects\n global old_innovative_projects\n old_covid_projects = list(covid_type.project_type.all())\n old_research_projects = list(research_type.project_type.all())\n old_education_projects = list(education_type.project_type.all())\n old_innovative_projects = list(innovative_type.project_type.all())",
"def add_tags(self, image_id, tags):\n\t\tfor tag in tags:\n\t\t\timage_tag = ImageTag(image_id=image_id, tag_id=tag)\n\t\t\tself.session.add(image_tag)\n\n\t\tself.session.commit()",
"def tag(profile, internet_gateway, key, value):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"Resources\"] = [internet_gateway]\n params[\"Tags\"] = [{\"Key\": key, \"Value\": value}]\n return client.create_tags(**params)",
"def add_tags_to_resource(ResourceId=None, Tags=None):\n pass",
"def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def createPatchTag(tags, instance_id, nt_id):\n\n client = boto3.client('ssm')\n response = client.describe_instance_information(\n InstanceInformationFilterList=[\n {\n 'key': 'InstanceIds',\n 'valueSet': [instance_id]\n }\n ]\n )\n patch_tag_value = ''\n platform_name = ''\n if (response['InstanceInformationList']):\n platform_name = response['InstanceInformationList'][0]['PlatformName'] \n if 'Red Hat Enterprise Linux' in platform_name:\n patch_tag_value = 'default-rhel'\n elif 'Windows' in platform_name:\n patch_tag_value = 'default-windows'\n elif 'Ubuntu' in platform_name:\n patch_tag_value = 'default-ubuntu'\n elif 'Centos' in platform_name:\n patch_tag_value = 'default-centos'\n elif 'Amazon Linux 2' in platform_name:\n patch_tag_value = 'default-amazon2'\n elif 'Amazon Linux' in platform_name:\n patch_tag_value = 'default-amazon'\n else:\n print(\"No patch group found for platform\")\n patch_tag_value = 'Not yet populated'\n\n return patch_tag_value",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def register_elb_instances(elbclient, elbname, instance_ids):\r\n Instances = list(map(\r\n lambda x: {'InstanceId': x},\r\n instance_ids\r\n ))\r\n try:\r\n elbclient.register_instances_with_load_balancer(\r\n LoadBalancerName=elbname,\r\n Instances=Instances,\r\n DryRun=True\r\n )\r\n except Exception as ex:\r\n print(ex.message)\r\n return False\r\n return True",
"def pool_into(self, target):\n for taggable in taggables().values():\n for t in taggable.by_user(self.owner).filter(tags=self):\n t.tag(target)",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def AddTagsToResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'rds':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'elasticache':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'ds':\n response = Client.add_tags_to_resource (\n ResourceId = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)",
"def add_tags(ResourceArn=None, Tags=None):\n pass",
"def start(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.start_instances(InstanceIds=[instance_id])\n print(f\"Start instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def tag_resource(resourceArn=None, tags=None):\n pass",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})",
"def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)",
"def create_instances(ec2_resource: ServiceResource,\n image_id: str,\n key_name: str,\n instance_type: str,\n num_instances: int = 1,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: str = None,\n tags: Optional[Dict[str, str]] = None) -> List[dict]:\n logger.info('Creating %s instance(s) ... ', instance_type)\n\n if isinstance(user_data, str):\n user_data = user_data.encode('utf-8')\n\n request = {'ImageId': image_id,\n 'MinCount': num_instances,\n 'MaxCount': num_instances,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n request['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n request['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n return ec2_resource.create_instances(**prune(request))",
"async def add_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"insert into tags(tag_name, map_id) \n select ?, (select map_id from maps where map_path=?)\n where not exists\n (select * from tags where tag_name = ? and map_id = (select map_id from maps where map_path=?)) \"\"\"\n select(conn, insert_sql, (tag, map_name, tag, map_name))\n await channel.send(f\"Added tags `{' '.join(tags)}` for map {map_name} if it wasn't set\")",
"def create_tags(tags_path: Path, email: str, password: str, host_url: str):\n with open(tags_path) as f:\n tags_json = json.load(f)\n\n client = client_util.make_client(host_url, email, password)\n\n # Build dictionary of tags as they exist on the server, mapped by slug.\n online_tags_resp = api_get_tags.sync_detailed(client=client)\n if online_tags_resp.status_code != HTTPStatus.OK:\n click.echo(f\"Request to get tags failed with status {online_tags_resp}\")\n exit(1)\n online_tags = {\n online_tag.slug: online_tag for online_tag in online_tags_resp.parsed\n }\n\n # Record slugs of tags that failed.\n failures = set()\n\n for tag in tags_json[\"tags\"]:\n slug = tag[\"slug\"]\n name = tag[\"name\"]\n description = tag[\"description\"]\n color = tag.get(\"color\")\n\n if slug in online_tags:\n # Update\n online_tag = online_tags[slug]\n if (\n name == online_tag.name\n and description == online_tag.description\n and (color is None or color == online_tag.color)\n ):\n click.echo(f\"Tag {slug} is already up to date.\")\n else:\n click.echo(f\"Updating tag {slug}\")\n res = api_update_tag.sync_detailed(\n slug,\n client=client,\n json_body=PutTagsTagJsonBody(\n name,\n description,\n color if color else online_tags[slug].color,\n ),\n )\n if res.status_code != HTTPStatus.OK:\n click.echo(f\"Request failed with content={res.content}\")\n failures.add(slug)\n else:\n # Create\n click.echo(f\"Creating tag {slug}\")\n res = api_create_tag.sync_detailed(\n client=client,\n json_body=PostTagsJsonBody(\n name,\n slug,\n description,\n color=color if color else UNSET,\n ),\n )\n if res.status_code != HTTPStatus.CREATED:\n click.echo(f\"Request failed with content={res.content}\", err=True)\n failures.add(slug)\n\n if failures:\n click.echo(f\"Completed with failures: {failures}\", err=True)\n sys.exit(1)",
"def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i",
"def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags",
"def TagResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'lambda':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n TagName: TagValue\n }\n\t )\n elif self.Service == 'dax':\n response = Client.tag_resource (\n ResourceName = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'directconnect':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'key': TagName,\n\t\t\t'value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'dynamodb':\n response = Client.tag_resource (\n ResourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'kms':\n response = Client.tag_resource (\n KeyId = ResourceId,\n\t\tTags = [\n\t\t {\n 'TagKey': TagName,\n\t\t\t'TagValue': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'apigateway':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\ttags = [\n\t\t {\n\t\t\tTagName: TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'secretsmanager':\n response = Client.tag_resource (\n SecretId = ResourceId,\n\t\tTags = [\n\t\t {\n\t\t\t'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'cloudfront':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n 'Items': [\n\t\t {\n\t\t\t 'Key': TagName,\n\t\t\t 'Value': TagValue\n\t\t }\n ]\n\t\t}\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def create_instances_request(nodes, placement_groups=None, exclusive=False):\n assert len(nodes) > 0\n assert len(nodes) <= BULK_INSERT_LIMIT\n # model here indicates any node that can be used to describe the rest\n model = next(iter(nodes))\n partition = lkp.node_partition(model)\n template = lkp.node_template(model)\n region = lkp.node_region(model)\n\n body = NSDict()\n body.count = len(nodes)\n if not exclusive:\n body.minCount = 1\n\n # source of instance properties\n body.sourceInstanceTemplate = template\n\n # overwrites properties accross all instances\n body.instanceProperties = instance_properties(partition, model)\n\n # key is instance name, value overwrites properties\n body.perInstanceProperties = {\n k: per_instance_properties(k, placement_groups) for k in nodes\n }\n\n zones = {\n **{\n f\"zones/{zone}\": {\"preference\": \"ALLOW\"}\n for zone in partition.zone_policy_allow or []\n },\n **{\n f\"zones/{zone}\": {\"preference\": \"DENY\"}\n for zone in partition.zone_policy_deny or []\n },\n }\n if zones:\n body.locationPolicy = {\"locations\": zones}\n\n request = util.compute.regionInstances().bulkInsert(\n project=cfg.project, region=region, body=body.to_dict()\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n f\"new request: endpoint={request.methodId} nodes={to_hostlist(nodes)}\"\n )\n log_api_request(request)\n return request",
"def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})",
"def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True",
"def tags(self, tags: List[Tag]):\n\n self._tags = tags",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)",
"def startstop_instances(module, ecs, instance_ids, state, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n # Fail unless the user defined instance tags\n if not instance_tags:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n\n # To make an ECS tag filter, we need to prepend 'tag:' to each key.\n # An empty filter does no filtering, so it's safe to pass it to the\n # get_all_instances method even if the user did not specify instance_tags\n filters = []\n if instance_tags:\n for inst_tag in instance_tags:\n tag = {}\n tag[\"tag:\" + inst_tag['tag_key']] = inst_tag['tag_value']\n filters.append(tag)\n # Check (and eventually change) instances attributes and instances state\n running_instances_array = []\n region, connect_args = get_acs_connection_info(module)\n connect_args['force'] = module.params.get('force', None)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state != state:\n instance_dict_array.append(get_instance_info(inst))\n try:\n if state == 'running':\n inst.start()\n elif state == 'restarted':\n inst.reboot()\n else:\n inst.stop()\n except ECSResponseError as e:\n module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, instance_ids)",
"def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")",
"def add_tagging(self, task_instance):\n dag_run = task_instance.dag_run\n task = task_instance.task\n\n with sentry_sdk.configure_scope() as scope:\n for tag_name in self.SCOPE_TASK_INSTANCE_TAGS:\n attribute = getattr(task_instance, tag_name)\n scope.set_tag(tag_name, attribute)\n for tag_name in self.SCOPE_DAG_RUN_TAGS:\n attribute = getattr(dag_run, tag_name)\n scope.set_tag(tag_name, attribute)\n scope.set_tag(\"operator\", task.__class__.__name__)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def register_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.register_instances(self.name, instances)\r\n self.instances = new_instances",
"def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)",
"def create_tags(self, resource_ids, tags):\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('CreateTags', params, verb='POST')",
"def load_tags(tag_list):\n\n tag_insert = \"INSERT INTO release_tag VALUES\" \\\n \" (?, ?, ?, ?, ?, ?)\"\n dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)",
"def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)",
"def add_tag(args):\n\n if not args.nodespec and not args.software_profile and \\\n not args.hardware_profile:\n sys.stderr.write('Error: must specify --nodes'\n '/--software-profile/--hardware-profile\\n')\n sys.stderr.flush()\n sys.exit(1)\n\n session = DbManager().openSession()\n\n try:\n nodes = []\n softwareprofiles = []\n hardwareprofiles = []\n\n if args.nodespec:\n nodespec = args.nodespec.replace('*', '%')\n\n nodes = NodesDbHandler().getNodesByNameFilter(\n session, nodespec)\n\n if not nodes:\n sys.stderr.write(\n 'No nodes matching nodespec [{0}]\\n'.format(\n args.nodespec))\n\n sys.stderr.flush()\n\n sys.exit(1)\n\n if args.software_profile:\n softwareprofile_names = args.software_profile.split(',')\n\n for softwareprofile_name in softwareprofile_names:\n softwareprofile = SoftwareProfilesDbHandler().\\\n getSoftwareProfile(session, softwareprofile_name)\n\n softwareprofiles.append(softwareprofile)\n\n if args.hardware_profile:\n hardwareprofile_names = args.hardware_profile.split(',')\n\n for hardwareprofile_name in hardwareprofile_names:\n hardwareprofile = HardwareProfilesDbHandler().\\\n getHardwareProfile(session, hardwareprofile_name)\n\n hardwareprofiles.append(hardwareprofile)\n\n # Create list of 'Tags' database objects\n tag_objs = get_tag_objects(session, args.tags)\n\n # Associate with nodes\n for node in nodes or []:\n for tag_obj in tag_objs:\n if tag_obj in node.tags:\n # Tag already exists\n continue\n\n node.tags.append(tag_obj)\n\n print(node.name, node.tags)\n\n # Associate with software profiles\n for softwareprofile in softwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in softwareprofile.tags:\n continue\n\n softwareprofile.tags.append(tag_obj)\n\n # Associate with hardware profiles\n for hardwareprofile in hardwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in hardwareprofile.tags:\n continue\n\n hardwareprofile.tags.append(tag_obj)\n\n session.commit()\n finally:\n DbManager().closeSession()",
"def _add_tags_to_housekeeper(self, store: bool, tags: List[str]) -> None:\n for tag in tags:\n if store and self.hk.get_tag(name=tag) is None:\n self.hk.add_commit(self.hk.new_tag(tag))",
"def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):\n tags = []\n client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)\n result = client.get_all_tags(filters={\"resource-id\": instance_id})\n if result:\n for tag in result:\n tags.append({tag.name: tag.value})\n else:\n log.info(\"No tags found for instance_id %s\", instance_id)\n return tags"
] |
[
"0.74873763",
"0.65912956",
"0.65561086",
"0.6501582",
"0.6467801",
"0.6455448",
"0.6356754",
"0.63517284",
"0.63239217",
"0.6164876",
"0.60988724",
"0.60375375",
"0.599445",
"0.5963434",
"0.58057624",
"0.57843035",
"0.57726794",
"0.5756584",
"0.5738389",
"0.57328016",
"0.57313013",
"0.57230544",
"0.5686239",
"0.5677894",
"0.56662965",
"0.56567246",
"0.565524",
"0.56531125",
"0.565146",
"0.56451654",
"0.56209767",
"0.56205976",
"0.5612001",
"0.55944926",
"0.5583458",
"0.5570526",
"0.5557534",
"0.55464417",
"0.55440867",
"0.5515398",
"0.55125785",
"0.548511",
"0.5471506",
"0.5458045",
"0.5438132",
"0.5433872",
"0.5424822",
"0.5414369",
"0.5414369",
"0.5414369",
"0.5374374",
"0.5371427",
"0.53688776",
"0.5359795",
"0.53516203",
"0.5345461",
"0.53426623",
"0.53420615",
"0.5325097",
"0.5316941",
"0.5300474",
"0.52976036",
"0.52849084",
"0.52839524",
"0.528006",
"0.5268271",
"0.5254624",
"0.52427626",
"0.52422947",
"0.52421826",
"0.52406996",
"0.523975",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52392614",
"0.52339345",
"0.5222822",
"0.5211482",
"0.51988596",
"0.5196789",
"0.5185682",
"0.51794845",
"0.5176306",
"0.5167229",
"0.51289487",
"0.51284665",
"0.51248205",
"0.5121572",
"0.5119399",
"0.5118134",
"0.51158065",
"0.5109637",
"0.51061803",
"0.51058537"
] |
0.0
|
-1
|
You can create multiple tags and bind them to multiple instances. This allows you to classify and filter instances by tag. A tag consists of a key and a value. Each key must be unique in a region for an Alibaba Cloud account. Different keys can have the same value. If the tag you specify does not exist, this tag is automatically created and bound to the specified instance. If a tag that has the same key is already bound to the instance, the new tag overwrites the existing tag. You can bind up to 20 tags to each instance. You can bind tags to up to 50 instances each time you call the operation.
|
async def tag_resources_async(
self,
request: dds_20151201_models.TagResourcesRequest,
) -> dds_20151201_models.TagResourcesResponse:
runtime = util_models.RuntimeOptions()
return await self.tag_resources_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def test_can_create_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n instances = launch_instances('f1.2xlarge', 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={'fsimcluster': 'testcluster', 'secondtag': 'secondvalue'})\n instances.shouldnt.be.empty\n\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n\n operation_params = {\n 'InstanceIds': ids\n }\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.have.key('fsimcluster')\n tags['fsimcluster'].should.equal('testcluster')\n tags.should.have.key('secondtag')\n tags['secondtag'].should.equal('secondvalue')",
"def _BindSecureTagsToInstances(\n network_name, project, tag_mapping_file_name, compute_client\n):\n tag_mapping = _ReadTagMapping(tag_mapping_file_name)\n if not tag_mapping:\n return\n\n vm_instances = _GetInstancesInNetwork(project, network_name, compute_client)\n\n for vm in vm_instances:\n _BindTagsToInstance(tag_mapping, vm)\n _BindServiceTagsToInstance(tag_mapping, vm)",
"def test_can_query_multiple_instance_tags(self):\n\n # local imports of code-under-test ensure moto has mocks\n # registered before any possible calls out to AWS\n from awstools.awstools import launch_instances, run_block_device_dict, get_instances_by_tag_type, farm_security_group_setup\n\n # launch_instances requires vpc setup as done by firesim/scripts/setup_firesim.py\n from awstools.aws_setup import aws_setup\n aws_setup()\n farm_security_group_setup()\n\n tag1 = {'fsimcluster': 'testcluster'}\n type = 'f1.2xlarge'\n\n # create an instance with only a single tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags=tag1)\n instances.should.have.length_of(1)\n\n tag2 = { 'secondtag': 'secondvalue' }\n # create an instance with additional tag\n instances = launch_instances(type, 1,\n instancemarket=\"ondemand\", spotinterruptionbehavior=None, spotmaxprice=None,\n blockdevices=run_block_device_dict(),\n tags={**tag1, **tag2})\n instances.shouldnt.be.empty\n\n # There should be two instances total now, across two reservations\n ec2_client = boto3.client('ec2')\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate()\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n all_reservations.should.have.length_of(2)\n\n [i for r in all_reservations for i in r['Instances']].should.have.length_of(2)\n\n # get_instances_by_tag_type with both tags should only return one instance\n instances = get_instances_by_tag_type({**tag1, **tag2},type)\n list(instances).should.have.length_of(1)\n\n # and that instance should be the one with both tags\n ids = [i.id for i in instances]\n ids.shouldnt.be.empty\n\n operation_params = {\n 'InstanceIds': ids\n }\n\n paginator = ec2_client.get_paginator('describe_instances')\n page_iterator = paginator.paginate(**operation_params)\n\n all_reservations = []\n for page in page_iterator:\n page['ResponseMetadata']['HTTPStatusCode'].should.equal(200)\n all_reservations += page['Reservations']\n\n tags = {t['Key']:t['Value'] for t in all_reservations[0]['Instances'][0]['Tags']}\n tags.should.equal({**tag1, **tag2})\n\n # get_instances_by_tag_type with only the original tag should return both instances\n instances = get_instances_by_tag_type(tag1,type)\n list(instances).should.have.length_of(2)",
"def list_instances_by_tag(tag_key, tag_value):\n instances = EC2_MANAGER.list_instances_by_tag(tag_key, tag_value)\n\n print(str_sep)\n print(\"Listing EC2 instances from [{}] region with tag [{}:{}].\"\n .format(SESSION.region_name, tag_key, tag_value))\n print(\"{:20s}{:15s}{:10s}{}\".format(\"ID\", \"TYPE\", \"STATE\", \"NAME\"))\n print(str_sep)\n\n for reservations in instances['Reservations']:\n for instance in reservations['Instances']:\n name = next((item for item in instance['Tags'] if item[\"Key\"] == \"Name\"),\n {'Key': 'Name', 'Value': 'None'})\n\n print(\"{:20s}{:15s}{:10s}{}\".format(instance['InstanceId'],\n instance['InstanceType'],\n instance['State']['Name'],\n name['Value']))\n\n print(str_sep)",
"def create_tags(resource_id, key, value):\n response = EC2.create_tags(\n Resources=[\n resource_id,\n ],\n Tags=[\n {\n 'Key': key,\n 'Value': value\n },\n ]\n )\n return response",
"def tag_instance_subnet(self, tags):\n self._request({\"instance-subnet-tags\": dict(tags)})",
"def create_tags(tags_list):\n\n Tags.create_multiple(tags_list)",
"def attachInstanceTags(instance_id, tags):\n \n empty = False\n lambda_client = boto3.client('lambda')\n data = {\n 'comp_name': \"attachInstanceTags\", \n 'action': \"attach tags\", \n 'level': \"info\", \n 'msg': \"attached \" + str(tags) + \" to instance \" + instance_id\n } \n try:\n client = boto3.client('ec2')\n response = client.create_tags(\n Resources=[instance_id],\n Tags= tags\n )\n print(\"Attached tags to instance\")\n except ClientError as e:\n if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':\n print(\"No such instance exists\")\n empty = True\n else:\n print(\"Error attaching tags to instance: \" + str(e))\n \n if (not empty):\n invoke_response = lambda_client.invoke(\n FunctionName= os.environ.get(\"notify_snitch\"),\n InvocationType= \"RequestResponse\",\n Payload= json.dumps(data)\n )",
"def get_instances_by_tags(self, tags):\n return self.get_only_instances(filters={'tag:{}'.format(key): val for key, val in tags.items()})",
"def create_or_update_tags(self, Tags):\n tag = Tags[0]\n asg_name = tag['ResourceId']\n ec2_tag = {\n 'Key': tag['Key'],\n 'Value': tag['Value']\n }\n try:\n response = self.asg.create_or_update_tags(\n Tags=Tags\n )\n except Exception as e:\n logger.error('Unknown Error: %s', str(e))\n else:\n logger.info(response)\n\n asg_instances = self.get_asg_instance_ids(asg_name)\n return EC2Wrapper(self.session).create_tags(Resources=asg_instances, Tags=[ec2_tag])",
"def AddInstanceTags(self, instance, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def _aws_get_instance_by_tag(region, name, tag, raw):\n client = boto3.session.Session().client('ec2', region)\n matching_reservations = client.describe_instances(Filters=[{'Name': tag, 'Values': [name]}]).get('Reservations', [])\n instances = []\n [[instances.append(_aws_instance_from_dict(region, instance, raw)) # pylint: disable=expression-not-assigned\n for instance in reservation.get('Instances')] for reservation in matching_reservations if reservation]\n return instances",
"def _BindTagToInstance(tag_value, instance):\n messages = rm_tags.TagMessages()\n resource_name = _GetFullCanonicalResourceName(instance)\n\n tag_binding = messages.TagBinding(parent=resource_name, tagValue=tag_value)\n binding_req = messages.CloudresourcemanagerTagBindingsCreateRequest(\n tagBinding=tag_binding\n )\n\n location = _GetInstanceLocation(instance)\n\n with endpoints.CrmEndpointOverrides(location):\n try:\n op = rm_tags.TagBindingsService().Create(binding_req)\n if not op.done:\n operations.WaitForReturnOperation(\n op,\n 'Waiting for TagBinding for parent [{}] and tag value [{}] to be '\n 'created with [{}]'.format(resource_name, tag_value, op.name),\n )\n except Exception as e: # pylint: disable=broad-except\n log.status.Print('Tag binding could not be created: ' + repr(e))",
"def load_instances_tags(instance_id=None):\n loader = TagLoader(override_instance_id=instance_id)\n return loader.load_tags()",
"def ex_create_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'CreateTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def add_tag (self,tag,key):\r\n\r\n #with shelf\r\n\r\n if self.using_shelf:\r\n\r\n if tag in self.tag_dict:\r\n\r\n self.tag_dict[tag].add(key)\r\n\r\n else:\r\n\r\n self.tag_dict[tag] = {key}\r\n\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key,)\r\n db_cursor.execute(\"INSERT OR REPLACE \"\r\n +\"INTO tags_to_keys \"\r\n +\"(notebook, tag, keyword) \"\r\n +\"VALUES (?,?,?);\",value_tuple)",
"def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value",
"def initialize_tags(self):\n\t\tfor tag_enum in Tags:\n\t\t\ttag = Tag(id=tag_enum.value, description=tag_enum.name)\n\t\t\tself.session.add(tag)\n\t\t\tself.session.commit()",
"def create_tag(self, session, tags):\n self._tag(session.put, tags=tags, session=session)",
"def CreateTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'ec2':\n response = Client.create_tags(\n Resources = [\n\t\t ResourceId\n\t\t],\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'efs':\n response = Client.create_tags(\n FileSystemId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'redshift':\n response = Client.create_tags(\n ResourceName = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n elif self.Service == 'workspaces':\n response = Client.create_tags(\n ResourceId = ResourceId,\n\t\tTags = [\n {\n\t\t 'Key': TagName,\n\t\t 'Value': TagValue\n }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def create_tags(configurationIds=None, tags=None):\n pass",
"def upsert_tags(self, entry, tags):\n if not tags:\n return\n\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for tag in tags:\n logging.info('Processing Tag from Template: %s ...', tag.template)\n\n tag_to_create = tag\n tag_to_update = None\n for persisted_tag in persisted_tags:\n # The column field is not case sensitive.\n if tag.template == persisted_tag.template and \\\n tag.column.lower() == persisted_tag.column.lower():\n\n tag_to_create = None\n tag.name = persisted_tag.name\n if not self.__tag_fields_are_equal(tag, persisted_tag):\n tag_to_update = tag\n break\n\n if tag_to_create:\n created_tag = self.create_tag(entry.name, tag_to_create)\n logging.info('Tag created: %s', created_tag.name)\n elif tag_to_update:\n self.update_tag(tag_to_update)\n logging.info('Tag updated: %s', tag_to_update.name)\n else:\n logging.info('Tag is up-to-date: %s', tag.name)",
"def _create_tag_request():\n\n key = helpers.get('Tag.1.Key')\n value = helpers.get('Tag.1.Value')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'createTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key,\n 'tags[0].value': value\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def AddTags(resource_id, region, **kwargs):\n if not kwargs:\n return\n\n describe_cmd = SoftLayer_PREFIX + [\n '--format',\n 'json',\n 'vs',\n 'detail',\n '%s' % resource_id]\n\n stdout, _ = IssueRetryableCommand(describe_cmd)\n response = json.loads(stdout)\n tags = response['tags']\n\n tag_cmd = SoftLayer_PREFIX + [\n 'vs',\n 'edit']\n\n if tags is not None:\n for tag in tags:\n tag_cmd = tag_cmd + ['--tag', '{0}'.format(tag)]\n\n for key, value in kwargs.items():\n tag_cmd = tag_cmd + ['--tag', '{0}:{1}'.format(key, value)]\n\n tag_cmd = tag_cmd + ['{0}'.format(resource_id)]\n IssueRetryableCommand(tag_cmd)",
"def _get_instances(instance_tags, region):\n return ec2_conn[region].get_all_instances(filters={\"tag:Name\": instance_tags})",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def tag(self, uuid, tags):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.tag(uuid, tags)",
"def create_instances(region_name, app_name, image_name,\n storage_enckey=None,\n s3_logs_bucket=None,\n identities_url=None,\n ssh_key_name=None,\n company_domain=None,\n ldap_host=None,\n instance_type=None,\n security_group_ids=None,\n instance_profile_arn=None,\n subnet_type=SUBNET_PRIVATE,\n subnet_id=None,\n vpc_id=None,\n vpc_cidr=None,\n tag_prefix=None,\n dry_run=False,\n template_name=None,\n ec2_client=None,\n **kwargs):\n if not instance_type:\n instance_type = 't3a.micro'\n if not template_name:\n template_name = \"%s-cloud-init-script.j2\" % app_name\n if not ec2_client:\n ec2_client = boto3.client('ec2', region_name=region_name)\n resp = ec2_client.describe_instances(\n Filters=[\n {'Name': 'tag:Name', 'Values': [\"*%s*\" % app_name]},\n {'Name': 'instance-state-name',\n 'Values': [EC2_RUNNING, EC2_STOPPED, EC2_PENDING]}])\n\n instances = None\n instance_ids = []\n stopped_instance_ids = []\n for reserv in resp['Reservations']:\n instances = reserv['Instances']\n for instance in reserv['Instances']:\n names = []\n for tag in instance['Tags']:\n if tag['Key'] == 'Name':\n names = [name.strip() for name in tag['Value'].split(',')]\n break\n if app_name not in names:\n continue\n instance_ids += [instance['InstanceId']]\n if instance['State']['Name'] == EC2_STOPPED:\n stopped_instance_ids += [instance['InstanceId']]\n if stopped_instance_ids:\n ec2_client.start_instances(\n InstanceIds=stopped_instance_ids,\n DryRun=dry_run)\n LOGGER.info(\"%s restarted instances %s for '%s'\",\n tag_prefix, stopped_instance_ids, app_name)\n if instance_ids:\n LOGGER.info(\"%s found instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n # If instances are running and there is a message queue,\n # we assume the infrastructure for this app is ready to accept\n # containers.\n return instances\n\n search_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), 'templates')\n template_loader = jinja2.FileSystemLoader(searchpath=search_path)\n template_env = jinja2.Environment(loader=template_loader)\n template = template_env.get_template(template_name)\n user_data = template.render(\n logs_storage_location=\"s3://%s\" % s3_logs_bucket,\n identities_url=identities_url,\n remote_drop_repo=\"https://github.com/djaodjin/drop.git\",\n company_domain=company_domain,\n ldap_host=ldap_host,\n **kwargs)\n\n # Find the ImageId\n image_id = _get_image_id(\n image_name, instance_profile_arn=instance_profile_arn,\n ec2_client=ec2_client, region_name=region_name)\n\n if not storage_enckey:\n # Always make sure the EBS storage is encrypted.\n storage_enckey = _get_or_create_storage_enckey(\n region_name, tag_prefix, dry_run=dry_run)\n\n block_devices = [\n {\n # `DeviceName` is required and must match expected name otherwise\n # an extra disk is created.\n 'DeviceName': '/dev/xvda', # XXX '/dev/sda1',\n #'VirtualName': 'string',\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html\n 'Ebs': {\n 'DeleteOnTermination': False,\n #'Iops': 100, # 'not supported for gp2'\n #'SnapshotId': 'string',\n 'VolumeSize': 8,\n 'VolumeType': 'gp2'\n },\n #'NoDevice': 'string'\n },\n ]\n if storage_enckey:\n # XXX Haven't been able to use the key we created but the default\n # aws/ebs is OK...\n for block_device in block_devices:\n block_device['Ebs'].update({\n 'KmsKeyId': storage_enckey,\n 'Encrypted': True\n })\n\n network_interfaces = [{\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n if not subnet_id:\n if not vpc_id:\n vpc_id, _ = _get_vpc_id(tag_prefix, ec2_client=ec2_client,\n region_name=region_name)\n web_subnet_cidrs, dbs_subnet_cidrs, app_subnet_cidrs = _split_cidrs(\n vpc_cidr, ec2_client=ec2_client, region_name=region_name)\n if subnet_type == SUBNET_PRIVATE:\n app_subnet_by_cidrs = _get_subnet_by_cidrs(\n app_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(app_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type == SUBNET_DBS:\n dbs_subnet_by_cidrs = _get_subnet_by_cidrs(\n dbs_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(dbs_subnet_by_cidrs.values()))['SubnetId']\n elif subnet_type in [SUBNET_PUBLIC_READY, SUBNET_PUBLIC]:\n web_subnet_by_cidrs = _get_subnet_by_cidrs(\n web_subnet_cidrs, tag_prefix,\n vpc_id=vpc_id, ec2_client=ec2_client)\n # Use first valid subnet.\n subnet_id = next(iter(web_subnet_by_cidrs.values()))['SubnetId']\n if subnet_type == SUBNET_PUBLIC:\n network_interfaces = [{\n 'AssociatePublicIpAddress': True,\n 'DeviceIndex': 0,\n 'SubnetId': subnet_id,\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n 'Groups': security_group_ids\n }]\n\n if not instances or not instance_ids:\n for _ in range(0, NB_RETRIES):\n # The IAM instance profile take some time to be visible.\n try:\n # Cannot use `SecurityGroups` with `SubnetId`\n # but can use `SecurityGroupIds`.\n resp = ec2_client.run_instances(\n BlockDeviceMappings=block_devices,\n ImageId=image_id,\n KeyName=ssh_key_name,\n InstanceType=instance_type,\n MinCount=1,\n MaxCount=1,\n#botocore.exceptions.ClientError: An error occurred (InvalidParameterCombination) when calling the RunInstances operation: Network interfaces and an instance-level subnet ID may not be specified on the same request\n# SubnetId=subnet_id,\n# SecurityGroupIds=security_group_ids,\n IamInstanceProfile={'Arn': instance_profile_arn},\n NetworkInterfaces=network_interfaces,\n TagSpecifications=[{\n 'ResourceType': \"instance\",\n 'Tags': [{\n 'Key': 'Name',\n 'Value': app_name\n }, {\n 'Key': 'Prefix',\n 'Value': tag_prefix\n }]\n }],\n UserData=user_data,\n DryRun=dry_run)\n instances = resp['Instances']\n instance_ids = [\n instance['InstanceId'] for instance in instances]\n break\n except botocore.exceptions.ClientError as err:\n if not err.response.get('Error', {}).get(\n 'Code', 'Unknown') == 'InvalidParameterValue':\n raise\n LOGGER.info(\"%s waiting for IAM instance profile %s to be\"\\\n \" operational ...\", tag_prefix, instance_profile_arn)\n time.sleep(RETRY_WAIT_DELAY)\n LOGGER.info(\"%s started instances %s for '%s'\",\n tag_prefix, instance_ids, app_name)\n for _ in range(0, NB_RETRIES):\n # It can take some time before the instances will appear\n # in a `describe_instances` call. We want to make sure\n # not to get errors later on if we execute too fast.\n try:\n resp = ec2_client.describe_instances(InstanceIds=instance_ids)\n break\n except botocore.exceptions.ClientError as err:\n err_code = err.response.get('Error', {}).get('Code', 'Unknown')\n LOGGER.error(\"XXX err_code=%s\", err_code)\n if not err_code == 'InvalidInstanceID.NotFound':\n raise\n LOGGER.info(\"%s waiting for EC2 instances %s to be\"\\\n \" operational ...\", tag_prefix, instance_ids)\n time.sleep(RETRY_WAIT_DELAY)\n\n return instances",
"def aws_tags(self, values):\n if not getattr(self, \"tags\", None):\n self.tags = {}\n\n tags = defaultdict(list)\n\n for tag in values:\n tags[tag[\"Key\"]].append(tag[\"Value\"])\n\n self.tags.update(tags)\n self._transform_known_tags()",
"def tag_instance_security_group(self, tags):\n self._request({\"instance-security-group-tags\": dict(tags)})",
"def create_tags(ResourceArn=None, Tags=None):\n pass",
"def tags():",
"def initiate_new_tag (self,tag,key):\r\n\r\n #with shelf\r\n if self.using_shelf:\r\n self.tag_dict[tag] = {key}\r\n #with database\r\n\r\n if self.using_database:\r\n\r\n value_tuple = (notebookname, tag, key)\r\n db_cursor.execute(\"INSERT OR REPLACE\"\r\n +\" INTO tags_to_keys\"\r\n +\" (notebook, tag, keyword)\"\r\n +\" VALUES (?,?,?);\",\r\n value_tuple)",
"def AddClusterTags(self, tags, dry_run=False, reason=None):\n query = [(\"tag\", t) for t in tags]\n _AppendDryRunIf(query, dry_run)\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT, \"/%s/tags\" % GANETI_RAPI_VERSION,\n query, None)",
"def add_tag(names, tag):\n for name in names:\n b = Box.query.filter_by(name=name).first()\n b.tags.add(tag)\n db.session.commit()",
"def add_tagging(self, task_instance):",
"def create(self, params={}, **options):\n return self.client.post(\"/tags\", params, **options)",
"def create_spot_instances(ec2, price, image_id, spec, num_instances=1, timeout=None, tentative=False, tags=None) -> Iterable[List[Boto2Instance]]:\n def spotRequestNotFound(e):\n return getattr(e, 'error_code', None) == \"InvalidSpotInstanceRequestID.NotFound\"\n\n for attempt in retry_ec2(retry_for=a_long_time,\n retry_while=inconsistencies_detected):\n with attempt:\n requests = ec2.request_spot_instances(\n price, image_id, count=num_instances, **spec)\n\n if tags is not None:\n for requestID in (request.id for request in requests):\n for attempt in retry_ec2(retry_while=spotRequestNotFound):\n with attempt:\n ec2.create_tags([requestID], tags)\n\n num_active, num_other = 0, 0\n # noinspection PyUnboundLocalVariable,PyTypeChecker\n # request_spot_instances's type annotation is wrong\n for batch in wait_spot_requests_active(ec2,\n requests,\n timeout=timeout,\n tentative=tentative):\n instance_ids = []\n for request in batch:\n if request.state == 'active':\n instance_ids.append(request.instance_id)\n num_active += 1\n else:\n logger.info(\n 'Request %s in unexpected state %s.',\n request.id,\n request.state)\n num_other += 1\n if instance_ids:\n # This next line is the reason we batch. It's so we can get multiple instances in\n # a single request.\n yield ec2.get_only_instances(instance_ids)\n if not num_active:\n message = 'None of the spot requests entered the active state'\n if tentative:\n logger.warning(message + '.')\n else:\n raise RuntimeError(message)\n if num_other:\n logger.warning('%i request(s) entered a state other than active.', num_other)",
"async def szuru_tag(self, ctx: commands.Context, postid: int, operation: str, *tags):\n raise NotImplementedError(f\"Work in progress!\") # TODO",
"def AddTags(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'es':\n response = Client.add_tags (\n ARN = ResourceId,\n TagList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'emr':\n response = Client.add_tags (\n ResourceId = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'cloudtrail':\n response = Client.add_tags (\n ResourceId = ResourceId,\n TagsList = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'sagemaker':\n response = Client.add_tags (\n ResourceArn = ResourceId,\n Tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n elif self.Service == 'datapipeline':\n response = Client.add_tags (\n pipelineId = ResourceId,\n tags = [\n\t\t {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n ]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def find_instances(\n instance_id=None,\n name=None,\n tags=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n return_objs=False,\n in_states=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n try:\n filter_parameters = {\"filters\": {}}\n\n if instance_id:\n filter_parameters[\"instance_ids\"] = [instance_id]\n\n if name:\n filter_parameters[\"filters\"][\"tag:Name\"] = name\n\n if tags:\n for tag_name, tag_value in tags.items():\n filter_parameters[\"filters\"][\"tag:{}\".format(tag_name)] = tag_value\n\n if filters:\n filter_parameters[\"filters\"].update(filters)\n\n reservations = conn.get_all_reservations(**filter_parameters)\n instances = [i for r in reservations for i in r.instances]\n log.debug(\n \"The filters criteria %s matched the following instances:%s\",\n filter_parameters,\n instances,\n )\n\n if in_states:\n instances = [i for i in instances if i.state in in_states]\n log.debug(\n \"Limiting instance matches to those in the requested states: %s\",\n instances,\n )\n if instances:\n if return_objs:\n return instances\n return [instance.id for instance in instances]\n else:\n return []\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return []",
"def create_tags(apps, _):\n type_model = apps.get_model(\"projects\", \"Type\")\n tag_model = apps.get_model(\"projects\", \"Tag\")\n\n # Tags which are a 1:1 migration\n global education_tag\n global covid_tag\n global innovative_tag\n global other_tag\n education_tag = tag_model(\n name=\"Computing Education\",\n description=\"Seeding inclusive computing education for the next generation \"\n \"and all computer-science training\",\n )\n covid_tag = tag_model(\n name=\"COVID\",\n description=\"Related to COVID-19\",\n )\n innovative_tag = tag_model(\n name=\"Innovative Application\", description=\"Applications for domain sciences\"\n )\n other_tag = tag_model(\n name=\"Other\",\n description=\"My project research area doesn’t fit in any of \"\n \"the predefined categories\",\n )\n\n tags = [\n education_tag,\n covid_tag,\n innovative_tag,\n other_tag,\n tag_model(\n name=\"Computer Architecture\",\n description=\"Designing computer systems optimized for high performance, \"\n \"energy efficiency, and scalability\",\n ),\n tag_model(\n name=\"Data Science\",\n description=\"Developing algorithms for managing and analyzing data at scale\",\n ),\n tag_model(\n name=\"Database Systems\",\n description=\"Designing systems for managing and storing data at scale\",\n ),\n tag_model(\n name=\"Human Computer Interaction\",\n description=\"Exploring the interfaces between people and technologies\",\n ),\n tag_model(\n name=\"AI and Machine Learning\",\n description=\"Foundations and applications of computer algorithms making \"\n \"data-centric models, predictions, and decisions\",\n ),\n tag_model(\n name=\"Networking\",\n description=\"Analysis, design, implementation, and use of local, \"\n \"wide-area, and mobile networks that link computers together\",\n ),\n tag_model(\n name=\"Programming Languages\",\n description=\"Devising new and better ways of programming the computers\",\n ),\n tag_model(\n name=\"Robotics\",\n description=\"Design, construction, operation, and use of robots\",\n ),\n tag_model(\n name=\"Scientific and High-Performance Computing\",\n description=\"Scientific discovery at the frontiers of computational \"\n \"performance, intelligence, and scale\",\n ),\n tag_model(\n name=\"Security and Privacy\",\n description=\"Understanding and defending against emerging threats in our \"\n \"increasingly computational world\",\n ),\n tag_model(\n name=\"Software Engineering\",\n description=\"Design, development, testing, and maintenance of \"\n \"software applications\",\n ),\n tag_model(\n name=\"Distributed Systems\",\n description=\"Harness the power of multiple computational units\",\n ),\n tag_model(\n name=\"Operating Systems\",\n description=\"Analysis, design, and implementation of operating systems\",\n ),\n tag_model(\n name=\"Storage Systems\",\n description=\"Capturing, managing, securing, and prioritizing data\",\n ),\n tag_model(\n name=\"Cloud Computing\",\n description=\"Delivering computing services over the Internet to offer \"\n \"faster innovation, flexible resources, and economies of scale\",\n ),\n tag_model(\n name=\"Edge Computing\",\n description=\"Bring applications closer to data sources such as IoT \"\n \"devices or local edge servers\",\n ),\n tag_model(\n name=\"Vision and Graphics\",\n description=\"Creating and analyzing data from the visual world, \"\n \"and visually understanding complex data\",\n ),\n tag_model(\n name=\"Theory of Computation\",\n description=\"Mathematical foundations of computation, including \"\n \"algorithm design, complexity and logic\",\n ),\n tag_model(\n name=\"Daypass\",\n description=\"Daypass project\",\n expose=False,\n ),\n ]\n\n tag_model.objects.bulk_create(tags)\n\n if type_model.objects.count() == 0:\n return\n covid_type = type_model.objects.get(name=\"COVID\")\n research_type = type_model.objects.get(name=\"CS Research\")\n education_type = type_model.objects.get(name=\"Education\")\n innovative_type = type_model.objects.get(name=\"Innovative Application\")\n\n # Gather the old tags. We have to remove the type model from the project model\n # to add the projects to the new tag model,\n # So all we do is collect them here, and then move them later.\n global old_covid_projects\n global old_research_projects\n global old_education_projects\n global old_innovative_projects\n old_covid_projects = list(covid_type.project_type.all())\n old_research_projects = list(research_type.project_type.all())\n old_education_projects = list(education_type.project_type.all())\n old_innovative_projects = list(innovative_type.project_type.all())",
"def add_tags(self, image_id, tags):\n\t\tfor tag in tags:\n\t\t\timage_tag = ImageTag(image_id=image_id, tag_id=tag)\n\t\t\tself.session.add(image_tag)\n\n\t\tself.session.commit()",
"def tag(profile, internet_gateway, key, value):\n client = boto3client.get(\"ec2\", profile)\n params = {}\n params[\"Resources\"] = [internet_gateway]\n params[\"Tags\"] = [{\"Key\": key, \"Value\": value}]\n return client.create_tags(**params)",
"def add_tags_to_resource(ResourceId=None, Tags=None):\n pass",
"def tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_))",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def createPatchTag(tags, instance_id, nt_id):\n\n client = boto3.client('ssm')\n response = client.describe_instance_information(\n InstanceInformationFilterList=[\n {\n 'key': 'InstanceIds',\n 'valueSet': [instance_id]\n }\n ]\n )\n patch_tag_value = ''\n platform_name = ''\n if (response['InstanceInformationList']):\n platform_name = response['InstanceInformationList'][0]['PlatformName'] \n if 'Red Hat Enterprise Linux' in platform_name:\n patch_tag_value = 'default-rhel'\n elif 'Windows' in platform_name:\n patch_tag_value = 'default-windows'\n elif 'Ubuntu' in platform_name:\n patch_tag_value = 'default-ubuntu'\n elif 'Centos' in platform_name:\n patch_tag_value = 'default-centos'\n elif 'Amazon Linux 2' in platform_name:\n patch_tag_value = 'default-amazon2'\n elif 'Amazon Linux' in platform_name:\n patch_tag_value = 'default-amazon'\n else:\n print(\"No patch group found for platform\")\n patch_tag_value = 'Not yet populated'\n\n return patch_tag_value",
"def createTag(self, authenticationToken, tag):\r\n pass",
"def register_elb_instances(elbclient, elbname, instance_ids):\r\n Instances = list(map(\r\n lambda x: {'InstanceId': x},\r\n instance_ids\r\n ))\r\n try:\r\n elbclient.register_instances_with_load_balancer(\r\n LoadBalancerName=elbname,\r\n Instances=Instances,\r\n DryRun=True\r\n )\r\n except Exception as ex:\r\n print(ex.message)\r\n return False\r\n return True",
"def pool_into(self, target):\n for taggable in taggables().values():\n for t in taggable.by_user(self.owner).filter(tags=self):\n t.tag(target)",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def AddTagsToResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'rds':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'elasticache':\n response = Client.add_tags_to_resource (\n ResourceName = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'ds':\n response = Client.add_tags_to_resource (\n ResourceId = ResourceId,\n Tags = [\n {\n 'Key': TagName,\n 'Value': TagValue\n\t\t }\n\t\t]\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)",
"def add_tags(ResourceArn=None, Tags=None):\n pass",
"def start(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.start_instances(InstanceIds=[instance_id])\n print(f\"Start instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def tag_resource(resourceArn=None, tags=None):\n pass",
"def get_instances(instance_ids):\n\n instances = dict()\n conn = connect_to_region(REGION, aws_access_key_id=KEY_ID, aws_secret_access_key=ACCESS_KEY)\n try:\n reservations = conn.get_all_instances(instance_ids)\n except EC2ResponseError, ex:\n print 'Got exception when calling EC2 for instances (%s): %s' % \\\n (\", \".join(instance_ids), ex.error_message)\n return instances\n\n for r in reservations:\n if len(r.instances) and r.instances[0].id in instance_ids:\n instances[r.instances[0].id] = r.instances[0].tags[\"Name\"]\n\n return instances",
"def test_add_or_update_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'value1'}\n ],\n })\n p.run()\n\n # verify that the a new tag is added without modifying existing tags\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'value1', 'testtag': 'testvalue'})",
"def sample_tags(user, name='Main cuisine'):\n return Tag.objects.create(user=user, name=name)",
"def create_instances(ec2_resource: ServiceResource,\n image_id: str,\n key_name: str,\n instance_type: str,\n num_instances: int = 1,\n security_group_ids: Optional[List] = None,\n user_data: Optional[Union[str, bytes]] = None,\n block_device_map: Optional[List[Dict]] = None,\n instance_profile_arn: Optional[str] = None,\n placement_az: Optional[str] = None,\n subnet_id: str = None,\n tags: Optional[Dict[str, str]] = None) -> List[dict]:\n logger.info('Creating %s instance(s) ... ', instance_type)\n\n if isinstance(user_data, str):\n user_data = user_data.encode('utf-8')\n\n request = {'ImageId': image_id,\n 'MinCount': num_instances,\n 'MaxCount': num_instances,\n 'KeyName': key_name,\n 'SecurityGroupIds': security_group_ids,\n 'InstanceType': instance_type,\n 'UserData': user_data,\n 'BlockDeviceMappings': block_device_map,\n 'SubnetId': subnet_id}\n\n if instance_profile_arn:\n # We could just retry when we get an error because the ARN doesn't\n # exist, but we might as well wait for it.\n wait_until_instance_profile_arn_exists(instance_profile_arn)\n\n # Add it to the request\n request['IamInstanceProfile'] = {'Arn': instance_profile_arn}\n\n if placement_az:\n request['Placement'] = {'AvailabilityZone': placement_az}\n\n if tags:\n # Tag everything when we make it.\n flat_tags = flatten_tags(tags)\n request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': flat_tags},\n {'ResourceType': 'volume', 'Tags': flat_tags}]\n\n return ec2_resource.create_instances(**prune(request))",
"async def add_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"insert into tags(tag_name, map_id) \n select ?, (select map_id from maps where map_path=?)\n where not exists\n (select * from tags where tag_name = ? and map_id = (select map_id from maps where map_path=?)) \"\"\"\n select(conn, insert_sql, (tag, map_name, tag, map_name))\n await channel.send(f\"Added tags `{' '.join(tags)}` for map {map_name} if it wasn't set\")",
"def create_tags(tags_path: Path, email: str, password: str, host_url: str):\n with open(tags_path) as f:\n tags_json = json.load(f)\n\n client = client_util.make_client(host_url, email, password)\n\n # Build dictionary of tags as they exist on the server, mapped by slug.\n online_tags_resp = api_get_tags.sync_detailed(client=client)\n if online_tags_resp.status_code != HTTPStatus.OK:\n click.echo(f\"Request to get tags failed with status {online_tags_resp}\")\n exit(1)\n online_tags = {\n online_tag.slug: online_tag for online_tag in online_tags_resp.parsed\n }\n\n # Record slugs of tags that failed.\n failures = set()\n\n for tag in tags_json[\"tags\"]:\n slug = tag[\"slug\"]\n name = tag[\"name\"]\n description = tag[\"description\"]\n color = tag.get(\"color\")\n\n if slug in online_tags:\n # Update\n online_tag = online_tags[slug]\n if (\n name == online_tag.name\n and description == online_tag.description\n and (color is None or color == online_tag.color)\n ):\n click.echo(f\"Tag {slug} is already up to date.\")\n else:\n click.echo(f\"Updating tag {slug}\")\n res = api_update_tag.sync_detailed(\n slug,\n client=client,\n json_body=PutTagsTagJsonBody(\n name,\n description,\n color if color else online_tags[slug].color,\n ),\n )\n if res.status_code != HTTPStatus.OK:\n click.echo(f\"Request failed with content={res.content}\")\n failures.add(slug)\n else:\n # Create\n click.echo(f\"Creating tag {slug}\")\n res = api_create_tag.sync_detailed(\n client=client,\n json_body=PostTagsJsonBody(\n name,\n slug,\n description,\n color=color if color else UNSET,\n ),\n )\n if res.status_code != HTTPStatus.CREATED:\n click.echo(f\"Request failed with content={res.content}\", err=True)\n failures.add(slug)\n\n if failures:\n click.echo(f\"Completed with failures: {failures}\", err=True)\n sys.exit(1)",
"def instance(template, name, ami, type, keypair, interfaces,\n availability_zone=None, user_data=None, placement_group=None, role='unknown', iam_role=None,\n volume_size=None, tags=None):\n i = Instance(name, template=template)\n i.ImageId = ami\n i.InstanceType = type\n i.KeyName = Ref(keypair)\n\n i.Tags = Tags(Name=aws_name(i.title))\n if role:\n i.Tags += Tags(Role=role)\n\n if tags:\n i.Tags += Tags(**tags)\n\n if iam_role:\n if isinstance(iam_role, str):\n i.IamInstanceProfile = iam_role\n else:\n i.DependsOn = iam_role.title\n i.IamInstanceProfile = Ref(iam_role)\n\n if availability_zone:\n i.AvailabilityZone = availability_zone\n\n if placement_group:\n i.PlacementGroupName = Ref(placement_group)\n\n if volume_size:\n i.BlockDeviceMappings = [\n BlockDeviceMapping(DeviceName=\"/dev/sda1\", Ebs=EBSBlockDevice(VolumeSize=volume_size))\n ]\n\n if interfaces:\n i.NetworkInterfaces = [NetworkInterfaceProperty(DeviceIndex=index,\n NetworkInterfaceId=Ref(interface))\n for (index, interface) in enumerate(interfaces)]\n\n if user_data:\n i.UserData = Base64(Join('', [line + '\\n' for line in user_data.splitlines()]))\n\n return i",
"def ex_describe_tags(self, node):\n params = { 'Action': 'DescribeTags',\n 'Filter.0.Name': 'resource-id',\n 'Filter.0.Value.0': node.id,\n 'Filter.1.Name': 'resource-type',\n 'Filter.1.Value.0': 'instance',\n }\n\n result = self.connection.request(self.path,\n params=params.copy()).object\n\n tags = {}\n for element in self._findall(result, 'tagSet/item'):\n key = self._findtext(element, 'key')\n value = self._findtext(element, 'value')\n\n tags[key] = value\n return tags",
"def TagResource(self, ResourceId, TagName, TagValue):\n\n Client = boto3.client(self.Service)\n\n if self.Service == 'lambda':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n TagName: TagValue\n }\n\t )\n elif self.Service == 'dax':\n response = Client.tag_resource (\n ResourceName = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'directconnect':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'key': TagName,\n\t\t\t'value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'dynamodb':\n response = Client.tag_resource (\n ResourceArn = ResourceId,\n\t\tTags = [\n\t\t {\n 'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'kms':\n response = Client.tag_resource (\n KeyId = ResourceId,\n\t\tTags = [\n\t\t {\n 'TagKey': TagName,\n\t\t\t'TagValue': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'apigateway':\n response = Client.tag_resource (\n resourceArn = ResourceId,\n\t\ttags = [\n\t\t {\n\t\t\tTagName: TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'secretsmanager':\n response = Client.tag_resource (\n SecretId = ResourceId,\n\t\tTags = [\n\t\t {\n\t\t\t'Key': TagName,\n\t\t\t'Value': TagValue\n\t\t }\n\t\t]\n\t )\n elif self.Service == 'cloudfront':\n response = Client.tag_resource (\n Resource = ResourceId,\n\t\tTags = {\n 'Items': [\n\t\t {\n\t\t\t 'Key': TagName,\n\t\t\t 'Value': TagValue\n\t\t }\n ]\n\t\t}\n\t )\n else:\n raise TagNotSupportedError(str(self.Service))\n\n return True",
"def GetInstanceTags(self, instance, reason=None):\n query = []\n _AppendReason(query, reason)\n return self._SendRequest(HTTP_GET,\n (\"/%s/instances/%s/tags\" %\n (GANETI_RAPI_VERSION, instance)), query, None)",
"def create_instances_request(nodes, placement_groups=None, exclusive=False):\n assert len(nodes) > 0\n assert len(nodes) <= BULK_INSERT_LIMIT\n # model here indicates any node that can be used to describe the rest\n model = next(iter(nodes))\n partition = lkp.node_partition(model)\n template = lkp.node_template(model)\n region = lkp.node_region(model)\n\n body = NSDict()\n body.count = len(nodes)\n if not exclusive:\n body.minCount = 1\n\n # source of instance properties\n body.sourceInstanceTemplate = template\n\n # overwrites properties accross all instances\n body.instanceProperties = instance_properties(partition, model)\n\n # key is instance name, value overwrites properties\n body.perInstanceProperties = {\n k: per_instance_properties(k, placement_groups) for k in nodes\n }\n\n zones = {\n **{\n f\"zones/{zone}\": {\"preference\": \"ALLOW\"}\n for zone in partition.zone_policy_allow or []\n },\n **{\n f\"zones/{zone}\": {\"preference\": \"DENY\"}\n for zone in partition.zone_policy_deny or []\n },\n }\n if zones:\n body.locationPolicy = {\"locations\": zones}\n\n request = util.compute.regionInstances().bulkInsert(\n project=cfg.project, region=region, body=body.to_dict()\n )\n\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n f\"new request: endpoint={request.methodId} nodes={to_hostlist(nodes)}\"\n )\n log_api_request(request)\n return request",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tags(self, tags):\n\n self._tags = tags",
"def tag_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['tag'], model=model)",
"def test_add_or_update_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'unmodified', 'pre-existing-2': 'unmodified'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'tag1': 'value1', 'pre-existing-1': 'modified'}}\n ],\n })\n p.run()\n\n # verify modified tags\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'tag1': 'value1', 'pre-existing-1': 'modified', 'pre-existing-2': 'unmodified'})",
"def add_tags_to_photo(self, photo_id, tag_list):\n print('\\nHello from add_tags_to_photo, the tag list is: ', tag_list)\n\n # for each tag\n # check if the tag is in the database already\n # if it is not then add it to the tag table\n for tag in tag_list:\n\n # will return None if the tag is not in the tag table\n # tag_name is the column name\n data = self.db.get_row('tag', 'tag_name', tag)\n\n print('data is', data)\n\n if data is None:\n\n print('\\nthat value {} is not in the db\\n'.format(tag))\n\n self.db.make_query(\n '''\n insert into tag (tag_name, user_id, photos)\n values (\"{}\", \"{}\", {})\n '''.format(\n tag,\n '28035310@N00',\n self.get_photo_count_by_tag(tag)\n )\n )\n\n print('\\nshould be added now...\\n')\n\n if self.db.get_row('tag', 'tag_name', tag):\n print('\\nadded tag, ', tag, '\\n')\n\n # UNIQUE constraint can cause problems here\n # so catch any exceptions\n try:\n # The tag is now in the database.\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n except Exception as e:\n print('Problem adding tag to photo_tag ', e)\n\n data = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n tags_in_data = []\n if len(data) > 0:\n for tag in data:\n tags_in_data.append(tag[1])\n\n print(tags_in_data)\n for tag in tag_list:\n if tag not in tags_in_data:\n return False\n else:\n self.update_photo_count(tag)\n\n return True",
"def tags(self, tags: List[Tag]):\n\n self._tags = tags",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def sample_tag(user, name='Service Tag'):\n return Tag.objects.create(user=user, name=name)",
"def startstop_instances(module, ecs, instance_ids, state, instance_tags):\n\n changed = False\n instance_dict_array = []\n\n if not isinstance(instance_ids, list) or len(instance_ids) < 1:\n # Fail unless the user defined instance tags\n if not instance_tags:\n module.fail_json(msg='instance_ids should be a list of instances, aborting')\n\n # To make an ECS tag filter, we need to prepend 'tag:' to each key.\n # An empty filter does no filtering, so it's safe to pass it to the\n # get_all_instances method even if the user did not specify instance_tags\n filters = []\n if instance_tags:\n for inst_tag in instance_tags:\n tag = {}\n tag[\"tag:\" + inst_tag['tag_key']] = inst_tag['tag_value']\n filters.append(tag)\n # Check (and eventually change) instances attributes and instances state\n running_instances_array = []\n region, connect_args = get_acs_connection_info(module)\n connect_args['force'] = module.params.get('force', None)\n for inst in ecs.get_all_instances(instance_ids=instance_ids, filters=filters):\n if inst.state != state:\n instance_dict_array.append(get_instance_info(inst))\n try:\n if state == 'running':\n inst.start()\n elif state == 'restarted':\n inst.reboot()\n else:\n inst.stop()\n except ECSResponseError as e:\n module.fail_json(msg='Unable to change state for instance {0}, error: {1}'.format(inst.id, e))\n changed = True\n\n return (changed, instance_dict_array, instance_ids)",
"def create(self, name, tag):\n\n\t\turl_json = urllib.urlencode({\"name\": name, \"tag\": tag})\n\t\treturn self._create(\"/tag?json_hash=%s\" % url_json, \"tag\")",
"def add_tagging(self, task_instance):\n dag_run = task_instance.dag_run\n task = task_instance.task\n\n with sentry_sdk.configure_scope() as scope:\n for tag_name in self.SCOPE_TASK_INSTANCE_TAGS:\n attribute = getattr(task_instance, tag_name)\n scope.set_tag(tag_name, attribute)\n for tag_name in self.SCOPE_DAG_RUN_TAGS:\n attribute = getattr(dag_run, tag_name)\n scope.set_tag(tag_name, attribute)\n scope.set_tag(\"operator\", task.__class__.__name__)",
"def simplified_tags(self) -> Dict:\n return dict(self.client.get_instances_id_tags(self.id_, params={'simplify': True}))",
"def register_instances(self, instances):\r\n if isinstance(instances, str) or isinstance(instances, unicode):\r\n instances = [instances]\r\n new_instances = self.connection.register_instances(self.name, instances)\r\n self.instances = new_instances",
"def create_instance(StackId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, AvailabilityZone=None, VirtualizationType=None, SubnetId=None, Architecture=None, RootDeviceType=None, BlockDeviceMappings=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None, Tenancy=None):\n pass",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))",
"def multi_tag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\ttry:\n\t\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\t\ttag_username = validation.cast_integer(tag_userid, 'userid')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\n\t\tself.log.debug(\"about to tag %d images with %d tags\" % (len(image_ids), len(tag_names)))\n\t\tfor id in image_ids:\n\t\t\ttry:\n\t\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t\texcept errors.ValidationError, ex:\n\t\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t\tself.log.debug(\"image %s\" % id)\n\n\t\t# do all inserts in a single transaction\n\t\tdef tag_txn(txn, owner, tagger, ids, tags):\n\t\t\tfor id in ids:\n\t\t\t\tid = validation.cast_integer(id, 'id')\n\t\t\t\tfor tag in tags:\n\t\t\t\t\ttag = tag.lower()\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_insert_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\t\treturn self.app.db.runInteraction(tag_txn, owner_userid, tag_userid, image_ids, tag_names)",
"def load_tags(tag_list):\n\n tag_insert = \"INSERT INTO release_tag VALUES\" \\\n \" (?, ?, ?, ?, ?, ?)\"\n dbutils.load_list(tag_insert, tag_list, DATABASE_FILE)",
"def create_tags(self, resource_ids, tags):\r\n params = {}\r\n self.build_list_params(params, resource_ids, 'ResourceId')\r\n self.build_tag_param_list(params, tags)\r\n return self.get_status('CreateTags', params, verb='POST')",
"def spin_ec2(self):\n #message = event['message']\n init_script = \"\"\"#!/bin/bash\necho \"sleep 50\" >> /etc/rc.local\necho \"shutdown -H +5 >> /etc/rc.local\"\nsleep 50\nshutdown -H +5\"\"\"\n\n print ('Running script:')\n print (init_script)\n\n instance = EC2.run_instances(\n ImageId=AMI,\n InstanceType=INSTANCE_TYPE,\n MinCount=1, # required by boto, even though it's kinda obvious.\n MaxCount=1,\n InstanceInitiatedShutdownBehavior='stop', # make shutdown in script terminate ec2\n UserData=init_script # file to run on instance init.\n \n )\n\n print (\"New instance created.\")\n instance_id = instance['Instances'][0]['InstanceId']\n print (instance_id)\n print (instance)\n EC2.create_tags(Resources=[instance_id], Tags=[{\"Key\" : \"Name\", 'Value': 'test01',},],)",
"def add_tag(args):\n\n if not args.nodespec and not args.software_profile and \\\n not args.hardware_profile:\n sys.stderr.write('Error: must specify --nodes'\n '/--software-profile/--hardware-profile\\n')\n sys.stderr.flush()\n sys.exit(1)\n\n session = DbManager().openSession()\n\n try:\n nodes = []\n softwareprofiles = []\n hardwareprofiles = []\n\n if args.nodespec:\n nodespec = args.nodespec.replace('*', '%')\n\n nodes = NodesDbHandler().getNodesByNameFilter(\n session, nodespec)\n\n if not nodes:\n sys.stderr.write(\n 'No nodes matching nodespec [{0}]\\n'.format(\n args.nodespec))\n\n sys.stderr.flush()\n\n sys.exit(1)\n\n if args.software_profile:\n softwareprofile_names = args.software_profile.split(',')\n\n for softwareprofile_name in softwareprofile_names:\n softwareprofile = SoftwareProfilesDbHandler().\\\n getSoftwareProfile(session, softwareprofile_name)\n\n softwareprofiles.append(softwareprofile)\n\n if args.hardware_profile:\n hardwareprofile_names = args.hardware_profile.split(',')\n\n for hardwareprofile_name in hardwareprofile_names:\n hardwareprofile = HardwareProfilesDbHandler().\\\n getHardwareProfile(session, hardwareprofile_name)\n\n hardwareprofiles.append(hardwareprofile)\n\n # Create list of 'Tags' database objects\n tag_objs = get_tag_objects(session, args.tags)\n\n # Associate with nodes\n for node in nodes or []:\n for tag_obj in tag_objs:\n if tag_obj in node.tags:\n # Tag already exists\n continue\n\n node.tags.append(tag_obj)\n\n print(node.name, node.tags)\n\n # Associate with software profiles\n for softwareprofile in softwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in softwareprofile.tags:\n continue\n\n softwareprofile.tags.append(tag_obj)\n\n # Associate with hardware profiles\n for hardwareprofile in hardwareprofiles:\n for tag_obj in tag_objs:\n if tag_obj in hardwareprofile.tags:\n continue\n\n hardwareprofile.tags.append(tag_obj)\n\n session.commit()\n finally:\n DbManager().closeSession()",
"def _add_tags_to_housekeeper(self, store: bool, tags: List[str]) -> None:\n for tag in tags:\n if store and self.hk.get_tag(name=tag) is None:\n self.hk.add_commit(self.hk.new_tag(tag))",
"def get_tags(instance_id=None, keyid=None, key=None, profile=None, region=None):\n tags = []\n client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)\n result = client.get_all_tags(filters={\"resource-id\": instance_id})\n if result:\n for tag in result:\n tags.append({tag.name: tag.value})\n else:\n log.info(\"No tags found for instance_id %s\", instance_id)\n return tags"
] |
[
"0.74874234",
"0.6591031",
"0.6557189",
"0.6501294",
"0.64681226",
"0.645559",
"0.635655",
"0.6351105",
"0.6322822",
"0.6165512",
"0.6098559",
"0.60358423",
"0.59951353",
"0.59632707",
"0.5805972",
"0.57836425",
"0.5773342",
"0.5757457",
"0.5739055",
"0.5732122",
"0.5731473",
"0.57230246",
"0.5686502",
"0.5678016",
"0.5665878",
"0.56566536",
"0.5655553",
"0.5653486",
"0.56504405",
"0.56465375",
"0.5620883",
"0.56203717",
"0.56130797",
"0.55952966",
"0.5582817",
"0.557048",
"0.55579036",
"0.5545642",
"0.5543099",
"0.5515388",
"0.55129284",
"0.54856485",
"0.54711705",
"0.54575187",
"0.54388624",
"0.5434117",
"0.5425989",
"0.54149795",
"0.54149795",
"0.54149795",
"0.53736216",
"0.53712475",
"0.5368482",
"0.5361285",
"0.535148",
"0.53460485",
"0.53421456",
"0.53420824",
"0.53253186",
"0.53171515",
"0.530041",
"0.5297807",
"0.5285237",
"0.5282793",
"0.5280127",
"0.5268325",
"0.52542937",
"0.5243604",
"0.5243508",
"0.52415377",
"0.52399796",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.52398574",
"0.5239586",
"0.52340883",
"0.52231896",
"0.521173",
"0.5199103",
"0.51967865",
"0.51846576",
"0.5179411",
"0.5176077",
"0.51680386",
"0.51289827",
"0.51278645",
"0.512515",
"0.5121516",
"0.5118757",
"0.5118448",
"0.5116154",
"0.51096326",
"0.5106551",
"0.51063836"
] |
0.0
|
-1
|
> You can remove up to 20 tags at a time. If you remove a tag from all instances, the tag is automatically deleted.
|
def untag_resources_with_options(
self,
request: dds_20151201_models.UntagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UntagResourcesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.all):
query['All'] = request.all
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.region_id):
query['RegionId'] = request.region_id
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag_key):
query['TagKey'] = request.tag_key
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UntagResources',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UntagResourcesResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_tag(tag):\n tag.destroy()",
"def RemoveTags(obj):\n tags = obj.GetTags() # Get tags\n for t in tags: # Iterate through tags\n t.Remove() # Remove tag",
"async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))",
"def remove_tag(args):",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []",
"def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)",
"def delete_taggit_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n TaggitTag.objects.all().delete()",
"def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()",
"def __delete__(self, instance):\r\n self._set_instance_tag_cache(instance, '')",
"def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))",
"def destroyContainer(tag): #@NoSelf",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_taggit_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n TaggitTaggedItem.objects.all().delete()",
"def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})",
"def delete_tags(configurationIds=None, tags=None):\n pass",
"def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)",
"def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete_tags(self, entry, tags, tag_template_name):\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for persisted_tag in persisted_tags:\n logging.info('Processing Tag from Template: %s ...',\n persisted_tag.template)\n tag_to_delete = None\n\n if tag_template_name in persisted_tag.template:\n tag_to_delete = persisted_tag\n for tag in tags:\n if tag.template == persisted_tag.template and \\\n tag.column == persisted_tag.column:\n tag_to_delete = None\n break\n\n if tag_to_delete:\n self.delete_tag(tag_to_delete)\n logging.info('Tag deleted: %s', tag_to_delete.name)\n else:\n logging.info('Tag is up-to-date: %s', persisted_tag.name)",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"def remove_tags(self, tags):\n for tag in tags:\n self.remove_tag(tag)\n\n return self",
"def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def tags_remove(self, item, tags):\n self._createTagAction(item, \"tags_remove\", tags)",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def remove_many_descriptors(self, uuids):",
"def remove_tag(self, tag):\n for task in self._tasks:\n task.remove_tag(tag)\n\n return self",
"def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))",
"def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]",
"async def delete_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"delete from tags where \n map_id in (select map_id from maps where map_path=?) and \n tag_name=? \"\"\"\n select(conn, insert_sql, (map_name, tag))\n await channel.send(f\"Removed tags `{' '.join(tags)}` from map {map_name}\")",
"def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass",
"def delete_bucket_tagging(Bucket=None):\n pass",
"def destroy_machines_by_tag(self, tag_name):\n for droplet in self.manager.get_all_droplets(tag_name=tag_name):\n eprint(\"Destroying %s\" % droplet.name)\n droplet.destroy()",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def tags():",
"def remove():",
"def remove(self):",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def test_remove_tag_from_derived_metric(self):\n pass",
"def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])",
"def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def stop_untagged_instances(untagged_instance_ids, temporary_user):\n try:\n logger.info('Stopping the untagged instances : ')\n logger.info(untagged_instance_ids)\n temporary_user.stop_instances(InstanceIds=untagged_instance_ids)\n except Exception as error:\n logger.info('The instances failed to stop with the following error : {}'.format(error))",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def remove_tag(self, index):\n\n model_index = self.GetItemData(index)\n self.DeleteItem(model_index)\n del self._clientData[model_index]",
"def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)",
"def remove_tag(self, tag):\n cp = self.copy()\n cp.tags.remove(tag)\n return cp",
"def untagAll(self, authenticationToken, guid):\r\n pass",
"def tags_clear(self, item, tags):\n self._createTagAction(item, \"tags_clear\", tags)",
"def delete_asg_tags(asg_name, key):\n logger.info('Deleting tag from asg key: {}...'.format(key))\n if not app_config['DRY_RUN']:\n response = client.delete_tags(\n Tags=[\n {\n 'Key': key,\n 'ResourceId': asg_name,\n 'ResourceType': 'auto-scaling-group'\n },\n ]\n )\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS asg tag modification operation did not succeed. Exiting.')\n raise Exception('AWS asg tag modification operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg tag modification due to dry run flag set')\n response = {'message': 'dry run only'}\n return response",
"def remove_tags(self, tags):\n for task in self._tasks:\n task.remove_tags(tags)\n\n return self",
"def remove():\n pass",
"async def remove(self, ctx, name: str):\n if self.config.hexists(\"config:tags:global\", name):\n if not checks.sudo_check(ctx.message):\n await ctx.send(\"Only {} can remove global tags.\".format(self.bot.owner))\n else:\n self.config.hdel(\"config:tags:global\", name)\n\n elif self.config.hexists(\"chan:{}:tags\".format(ctx.message.channel.id), name):\n self.config.hdel(\"chan:{}:tags\".format(ctx.message.channel.id), name)\n # Don't allow ability to remove global tags\n\n elif self.config.hexists(\"guild:{}:tags\".format(ctx.message.guild.id), name):\n self.config.hdel(\"guild:{}:tags\".format(ctx.message.guild.id, name))\n\n await ctx.send(\"Tag {} removed.\".format(name))",
"def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)",
"def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]",
"def cli(ctx):\n stopped = click.style(\"Stopped\", fg=\"red\")\n removed = click.style(\"Removed\", fg=\"blue\")\n for container in ctx.docker.get_containers():\n name = container.hostname\n node_name = ''.join([i for i in name if not i.isdigit()])\n image_name = container.dictionary['Config']['Image']\n if node_name in TO_KILL:\n container.stop(timeout=0)\n else:\n container.stop(timeout=5)\n # container.execute(\"poweroff\", \"root\", \"/\", False)\n # container.wait()\n ctx.log(\"Container %s --> %s\" % (name, stopped))\n container.remove(v=False, link=False, force=True)\n ctx.log(\"Container %s --> %s\" % (name, removed))\n ctx.state['containers'].remove(container.short_id)\n ctx.state.fast_dump()\n # remove untagged image\n if not image_name.startswith(ctx.prefix):\n ctx.docker.remove_image(image_name, force=True)\n ctx.docker.remove_network()",
"def tags_delete(tag_id):\n\n tags = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tags)\n db.session.commit()\n\n flash(f\"'{tags.name}' tag is deleted.\")\n\n return redirect(\"/tags\")",
"def completely_remove_tag(self, owner_userid, tag_name):\n\t\tself.log.debug(\"completely_remove_tag()\")\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\n\t\td = self.app.db.runOperation(\"SELECT zoto_remove_tag_from_all_user_images(%s, %s)\",\n\t\t\t\t (owner_userid, tag_name))\n\t\td.addCallback(lambda _: (0, \"success\"))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d",
"def quit(user, tag):\n def work():\n member = Member.get(user)\n member.remove_tag(tag)\n member.put()\n db.run_in_transaction(work)",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)",
"def remove_node_by_tagname(nodes: List, tagname: str):\n\n for remove in [node for node in nodes if node.tagname == tagname]:\n nodes.remove(remove)",
"def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def expungeTag(self, authenticationToken, guid):\r\n pass",
"def deleteTags(bufferNumber):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGS, TAGLINENUMBERS, BUFFERTICKS\n\n # try to delete the tags for the buffer {{{\n try:\n del TAGS[bufferNumber]\n del TAGLINENUMBERS[bufferNumber]\n del BUFFERTICKS[bufferNumber]\n except:\n pass\n # }}}\n # }}}",
"def delete_unused_tags( self, owner ):\n self.filter(\n owner = owner,\n documents__isnull = True).delete()",
"def test_networking_project_network_tag_delete(self):\n pass",
"def tags_remove(self, item_id, tags, **params):\n\n if isinstance(tags, basestring):\n tags = tags.split(',')\n\n self.queue('tags_remove', item_id=item_id, tags=tags, **params)",
"def remove(self, value):\n tags = self.__all_tags()\n if value in tags:\n tags.remove(value)\n self.__post_changes(tags)",
"def remove_tag(session, tagname, username='system_user'):\n session = validate_session(session)\n tag = tag_exists(session, tag_name=tagname)\n tagid= tag.id\n tag_stats = session.query(TagStats).\\\n filter(TagStats.tag_id == tag.id)\n if tag:\n try:\n tag_stats.delete()\n session.commit()\n session.delete(tag)\n session.commit()\n return(True, \"Tag %s was deleted\" % (tagname), tagid)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s does not exists\" % (tagname))",
"async def delete_concurrency_limit_by_tag(\n self,\n tag: str,\n ):\n try:\n await self._client.delete(\n f\"/concurrency_limits/tag/{tag}\",\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise",
"def multi_untag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t@stack\n\t\tdef delete_txn(txn, owner, tags, ids, tagger):\n\t\t\tfor tag in tags:\n\t\t\t\tid_list = []\n\t\t\t\tfor id in ids:\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(delete_txn, owner_userid, tag_names, image_ids, tag_userid)",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def clean_tags(location, max_tags):\n git = '/usr/bin/git'\n ensure_dir(location)\n with utils.cd(location):\n cmd = [\n git,\n 'for-each-ref',\n '--sort=taggerdate',\n '--format=%(refname)',\n 'refs/tags'\n ]\n\n tags = subprocess.check_output(cmd).splitlines()\n old_tags = []\n while len(tags) > max_tags:\n tag = tags.pop(0)\n if tag.startswith('refs/tags/'):\n tag = tag[10:]\n\n # Don't delete tags that aren't ours\n if not tag.startswith(TAG_PREFIX):\n continue\n\n old_tags.append(tag)\n\n # if there aren't any old tags, bail early\n if len(old_tags) == 0:\n return\n\n cmd = [git, 'tag', '-d']\n cmd += old_tags\n subprocess.check_call(cmd)",
"def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def tearDown(self):\n Tag.objects.all().delete()\n super(TagTest, self).tearDown()",
"def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)",
"def replace_all_tags(tags):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"truncate mustard.tags\");\n\t\tpsycopg2.extras.execute_values(cur,\n\t\t\t\"insert into mustard.tags (id, english_name, english_desc) values %s\",\n\t\t\ttags)\n\t\tcur.execute(\"update mustard.status set tags_updated = now()\")",
"def ex_delete_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'DeleteTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def solr_delete(instances):\n __solr_prepare(instances)",
"def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def test_delete_instances(self, instances_count, create_instance):\n instance_name = generate_ids('instance').next()\n create_instance(instance_name, count=instances_count)",
"def _drop_tags(target, *regexps):\n\n for tagname in list(target.keys()):\n for _ in (x for x in regexps if re.search(x, tagname)):\n try:\n del target[tagname]\n logger.debug('Drop tag {0}[{1}]'.format(\n type(target).__name__, tagname))\n except KeyError:\n pass\n break",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def delete_tag(request):\n try:\n tags = request.POST.getlist('tag_id', 0)\n tag = Tag.objects.filter(pk__in=tags).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Tag %s\" % tags)\n return format_ajax_response(True, \"Knoweldgebase tag deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting the specified knowledgebase tag.\")",
"def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def delete_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass",
"def delete_tag(delete_timestamps):\n\n ctx = dash.callback_context\n triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)\n\n # When the button is initially added, it fires a callback.\n # We want to prevent this callback from making changes to the update signal.\n if triggered_value is None:\n raise PreventUpdate\n\n # Unfortunately, we have to convert the stringified dict back to a dict.\n # Dash doesn't provide us any other method to see which element triggered the callback.\n # This isn't very elegant, but I don't see any other way to proceed.\n id_dict = utils.string_to_dict(triggered_id)\n tag_idx = id_dict[\"index\"]\n state.delete_tag(tag_idx)\n\n return constants.OK_SIGNAL",
"async def remove(self, container, uids):"
] |
[
"0.71796733",
"0.68758255",
"0.6856147",
"0.6738873",
"0.6660143",
"0.6617361",
"0.6605107",
"0.6569438",
"0.65644944",
"0.6558519",
"0.6515231",
"0.65021783",
"0.6444791",
"0.63917637",
"0.63694924",
"0.63107765",
"0.6270814",
"0.6265261",
"0.625803",
"0.62343985",
"0.61936",
"0.61813426",
"0.6131549",
"0.6117737",
"0.6114514",
"0.61061996",
"0.60744405",
"0.60744405",
"0.6065185",
"0.60596365",
"0.6049634",
"0.6035556",
"0.6025484",
"0.6011416",
"0.6006487",
"0.5997965",
"0.5994748",
"0.5968711",
"0.5966337",
"0.5948067",
"0.5936516",
"0.59007347",
"0.59001243",
"0.589101",
"0.5886979",
"0.5880894",
"0.58647823",
"0.5856848",
"0.5853042",
"0.58432096",
"0.58362496",
"0.5803527",
"0.5802742",
"0.58023524",
"0.57949513",
"0.5769311",
"0.576411",
"0.5756913",
"0.5752936",
"0.5740305",
"0.5737808",
"0.5723935",
"0.5683401",
"0.56768125",
"0.5675899",
"0.5665922",
"0.56633765",
"0.5656861",
"0.5650562",
"0.564953",
"0.5646241",
"0.56456345",
"0.56230634",
"0.56228745",
"0.5614958",
"0.5614236",
"0.56130165",
"0.56053716",
"0.5604748",
"0.5597436",
"0.5595911",
"0.5591793",
"0.5582363",
"0.5581984",
"0.5563924",
"0.55610055",
"0.55601794",
"0.5556565",
"0.5553744",
"0.55485034",
"0.5542228",
"0.55394053",
"0.5528767",
"0.55280477",
"0.5527355",
"0.55267096",
"0.5520377",
"0.5519385",
"0.55111957",
"0.55106366",
"0.5509457"
] |
0.0
|
-1
|
> You can remove up to 20 tags at a time. If you remove a tag from all instances, the tag is automatically deleted.
|
async def untag_resources_with_options_async(
self,
request: dds_20151201_models.UntagResourcesRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UntagResourcesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.all):
query['All'] = request.all
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.region_id):
query['RegionId'] = request.region_id
if not UtilClient.is_unset(request.resource_group_id):
query['ResourceGroupId'] = request.resource_group_id
if not UtilClient.is_unset(request.resource_id):
query['ResourceId'] = request.resource_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.resource_type):
query['ResourceType'] = request.resource_type
if not UtilClient.is_unset(request.tag_key):
query['TagKey'] = request.tag_key
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UntagResources',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UntagResourcesResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_tag(tag):\n tag.destroy()",
"def RemoveTags(obj):\n tags = obj.GetTags() # Get tags\n for t in tags: # Iterate through tags\n t.Remove() # Remove tag",
"async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))",
"def remove_tag(args):",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []",
"def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)",
"def delete_taggit_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n TaggitTag.objects.all().delete()",
"def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()",
"def __delete__(self, instance):\r\n self._set_instance_tag_cache(instance, '')",
"def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))",
"def destroyContainer(tag): #@NoSelf",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_taggit_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n TaggitTaggedItem.objects.all().delete()",
"def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})",
"def delete_tags(configurationIds=None, tags=None):\n pass",
"def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)",
"def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete_tags(self, entry, tags, tag_template_name):\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for persisted_tag in persisted_tags:\n logging.info('Processing Tag from Template: %s ...',\n persisted_tag.template)\n tag_to_delete = None\n\n if tag_template_name in persisted_tag.template:\n tag_to_delete = persisted_tag\n for tag in tags:\n if tag.template == persisted_tag.template and \\\n tag.column == persisted_tag.column:\n tag_to_delete = None\n break\n\n if tag_to_delete:\n self.delete_tag(tag_to_delete)\n logging.info('Tag deleted: %s', tag_to_delete.name)\n else:\n logging.info('Tag is up-to-date: %s', persisted_tag.name)",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"def remove_tags(self, tags):\n for tag in tags:\n self.remove_tag(tag)\n\n return self",
"def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def tags_remove(self, item, tags):\n self._createTagAction(item, \"tags_remove\", tags)",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def remove_many_descriptors(self, uuids):",
"def remove_tag(self, tag):\n for task in self._tasks:\n task.remove_tag(tag)\n\n return self",
"def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))",
"def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]",
"async def delete_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"delete from tags where \n map_id in (select map_id from maps where map_path=?) and \n tag_name=? \"\"\"\n select(conn, insert_sql, (map_name, tag))\n await channel.send(f\"Removed tags `{' '.join(tags)}` from map {map_name}\")",
"def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass",
"def delete_bucket_tagging(Bucket=None):\n pass",
"def destroy_machines_by_tag(self, tag_name):\n for droplet in self.manager.get_all_droplets(tag_name=tag_name):\n eprint(\"Destroying %s\" % droplet.name)\n droplet.destroy()",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def tags():",
"def remove():",
"def remove(self):",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def test_remove_tag_from_derived_metric(self):\n pass",
"def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])",
"def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def stop_untagged_instances(untagged_instance_ids, temporary_user):\n try:\n logger.info('Stopping the untagged instances : ')\n logger.info(untagged_instance_ids)\n temporary_user.stop_instances(InstanceIds=untagged_instance_ids)\n except Exception as error:\n logger.info('The instances failed to stop with the following error : {}'.format(error))",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def remove_tag(self, index):\n\n model_index = self.GetItemData(index)\n self.DeleteItem(model_index)\n del self._clientData[model_index]",
"def remove_tag(self, tag):\n cp = self.copy()\n cp.tags.remove(tag)\n return cp",
"def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)",
"def untagAll(self, authenticationToken, guid):\r\n pass",
"def tags_clear(self, item, tags):\n self._createTagAction(item, \"tags_clear\", tags)",
"def delete_asg_tags(asg_name, key):\n logger.info('Deleting tag from asg key: {}...'.format(key))\n if not app_config['DRY_RUN']:\n response = client.delete_tags(\n Tags=[\n {\n 'Key': key,\n 'ResourceId': asg_name,\n 'ResourceType': 'auto-scaling-group'\n },\n ]\n )\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS asg tag modification operation did not succeed. Exiting.')\n raise Exception('AWS asg tag modification operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg tag modification due to dry run flag set')\n response = {'message': 'dry run only'}\n return response",
"def remove_tags(self, tags):\n for task in self._tasks:\n task.remove_tags(tags)\n\n return self",
"def remove():\n pass",
"async def remove(self, ctx, name: str):\n if self.config.hexists(\"config:tags:global\", name):\n if not checks.sudo_check(ctx.message):\n await ctx.send(\"Only {} can remove global tags.\".format(self.bot.owner))\n else:\n self.config.hdel(\"config:tags:global\", name)\n\n elif self.config.hexists(\"chan:{}:tags\".format(ctx.message.channel.id), name):\n self.config.hdel(\"chan:{}:tags\".format(ctx.message.channel.id), name)\n # Don't allow ability to remove global tags\n\n elif self.config.hexists(\"guild:{}:tags\".format(ctx.message.guild.id), name):\n self.config.hdel(\"guild:{}:tags\".format(ctx.message.guild.id, name))\n\n await ctx.send(\"Tag {} removed.\".format(name))",
"def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)",
"def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]",
"def cli(ctx):\n stopped = click.style(\"Stopped\", fg=\"red\")\n removed = click.style(\"Removed\", fg=\"blue\")\n for container in ctx.docker.get_containers():\n name = container.hostname\n node_name = ''.join([i for i in name if not i.isdigit()])\n image_name = container.dictionary['Config']['Image']\n if node_name in TO_KILL:\n container.stop(timeout=0)\n else:\n container.stop(timeout=5)\n # container.execute(\"poweroff\", \"root\", \"/\", False)\n # container.wait()\n ctx.log(\"Container %s --> %s\" % (name, stopped))\n container.remove(v=False, link=False, force=True)\n ctx.log(\"Container %s --> %s\" % (name, removed))\n ctx.state['containers'].remove(container.short_id)\n ctx.state.fast_dump()\n # remove untagged image\n if not image_name.startswith(ctx.prefix):\n ctx.docker.remove_image(image_name, force=True)\n ctx.docker.remove_network()",
"def tags_delete(tag_id):\n\n tags = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tags)\n db.session.commit()\n\n flash(f\"'{tags.name}' tag is deleted.\")\n\n return redirect(\"/tags\")",
"def completely_remove_tag(self, owner_userid, tag_name):\n\t\tself.log.debug(\"completely_remove_tag()\")\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\n\t\td = self.app.db.runOperation(\"SELECT zoto_remove_tag_from_all_user_images(%s, %s)\",\n\t\t\t\t (owner_userid, tag_name))\n\t\td.addCallback(lambda _: (0, \"success\"))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d",
"def quit(user, tag):\n def work():\n member = Member.get(user)\n member.remove_tag(tag)\n member.put()\n db.run_in_transaction(work)",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)",
"def remove_node_by_tagname(nodes: List, tagname: str):\n\n for remove in [node for node in nodes if node.tagname == tagname]:\n nodes.remove(remove)",
"def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def expungeTag(self, authenticationToken, guid):\r\n pass",
"def deleteTags(bufferNumber):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGS, TAGLINENUMBERS, BUFFERTICKS\n\n # try to delete the tags for the buffer {{{\n try:\n del TAGS[bufferNumber]\n del TAGLINENUMBERS[bufferNumber]\n del BUFFERTICKS[bufferNumber]\n except:\n pass\n # }}}\n # }}}",
"def delete_unused_tags( self, owner ):\n self.filter(\n owner = owner,\n documents__isnull = True).delete()",
"def tags_remove(self, item_id, tags, **params):\n\n if isinstance(tags, basestring):\n tags = tags.split(',')\n\n self.queue('tags_remove', item_id=item_id, tags=tags, **params)",
"def test_networking_project_network_tag_delete(self):\n pass",
"def remove(self, value):\n tags = self.__all_tags()\n if value in tags:\n tags.remove(value)\n self.__post_changes(tags)",
"def remove_tag(session, tagname, username='system_user'):\n session = validate_session(session)\n tag = tag_exists(session, tag_name=tagname)\n tagid= tag.id\n tag_stats = session.query(TagStats).\\\n filter(TagStats.tag_id == tag.id)\n if tag:\n try:\n tag_stats.delete()\n session.commit()\n session.delete(tag)\n session.commit()\n return(True, \"Tag %s was deleted\" % (tagname), tagid)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s does not exists\" % (tagname))",
"async def delete_concurrency_limit_by_tag(\n self,\n tag: str,\n ):\n try:\n await self._client.delete(\n f\"/concurrency_limits/tag/{tag}\",\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise",
"def multi_untag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t@stack\n\t\tdef delete_txn(txn, owner, tags, ids, tagger):\n\t\t\tfor tag in tags:\n\t\t\t\tid_list = []\n\t\t\t\tfor id in ids:\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(delete_txn, owner_userid, tag_names, image_ids, tag_userid)",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def clean_tags(location, max_tags):\n git = '/usr/bin/git'\n ensure_dir(location)\n with utils.cd(location):\n cmd = [\n git,\n 'for-each-ref',\n '--sort=taggerdate',\n '--format=%(refname)',\n 'refs/tags'\n ]\n\n tags = subprocess.check_output(cmd).splitlines()\n old_tags = []\n while len(tags) > max_tags:\n tag = tags.pop(0)\n if tag.startswith('refs/tags/'):\n tag = tag[10:]\n\n # Don't delete tags that aren't ours\n if not tag.startswith(TAG_PREFIX):\n continue\n\n old_tags.append(tag)\n\n # if there aren't any old tags, bail early\n if len(old_tags) == 0:\n return\n\n cmd = [git, 'tag', '-d']\n cmd += old_tags\n subprocess.check_call(cmd)",
"def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def tearDown(self):\n Tag.objects.all().delete()\n super(TagTest, self).tearDown()",
"def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)",
"def replace_all_tags(tags):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"truncate mustard.tags\");\n\t\tpsycopg2.extras.execute_values(cur,\n\t\t\t\"insert into mustard.tags (id, english_name, english_desc) values %s\",\n\t\t\ttags)\n\t\tcur.execute(\"update mustard.status set tags_updated = now()\")",
"def ex_delete_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'DeleteTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def solr_delete(instances):\n __solr_prepare(instances)",
"def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def test_delete_instances(self, instances_count, create_instance):\n instance_name = generate_ids('instance').next()\n create_instance(instance_name, count=instances_count)",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def _drop_tags(target, *regexps):\n\n for tagname in list(target.keys()):\n for _ in (x for x in regexps if re.search(x, tagname)):\n try:\n del target[tagname]\n logger.debug('Drop tag {0}[{1}]'.format(\n type(target).__name__, tagname))\n except KeyError:\n pass\n break",
"def delete_tag(request):\n try:\n tags = request.POST.getlist('tag_id', 0)\n tag = Tag.objects.filter(pk__in=tags).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Tag %s\" % tags)\n return format_ajax_response(True, \"Knoweldgebase tag deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting the specified knowledgebase tag.\")",
"def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def delete_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass",
"def delete_tag(delete_timestamps):\n\n ctx = dash.callback_context\n triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)\n\n # When the button is initially added, it fires a callback.\n # We want to prevent this callback from making changes to the update signal.\n if triggered_value is None:\n raise PreventUpdate\n\n # Unfortunately, we have to convert the stringified dict back to a dict.\n # Dash doesn't provide us any other method to see which element triggered the callback.\n # This isn't very elegant, but I don't see any other way to proceed.\n id_dict = utils.string_to_dict(triggered_id)\n tag_idx = id_dict[\"index\"]\n state.delete_tag(tag_idx)\n\n return constants.OK_SIGNAL",
"async def remove(self, container, uids):"
] |
[
"0.71809316",
"0.68764615",
"0.6857069",
"0.6740014",
"0.66612315",
"0.66182756",
"0.66062665",
"0.65692097",
"0.6564891",
"0.65599036",
"0.65165895",
"0.65031886",
"0.6446048",
"0.6392404",
"0.63693607",
"0.6309676",
"0.62702256",
"0.6265954",
"0.62573457",
"0.62348354",
"0.6194601",
"0.6182905",
"0.6132926",
"0.6118087",
"0.61154354",
"0.61067057",
"0.607406",
"0.607406",
"0.6066094",
"0.6060391",
"0.6050302",
"0.60368496",
"0.60267806",
"0.6012212",
"0.6007227",
"0.5997727",
"0.59953666",
"0.5969917",
"0.5967137",
"0.5947783",
"0.59379095",
"0.5902614",
"0.59009075",
"0.58910894",
"0.5887626",
"0.588097",
"0.58659697",
"0.58574295",
"0.58542466",
"0.584342",
"0.58367133",
"0.58044803",
"0.58043194",
"0.58037454",
"0.57950705",
"0.5769392",
"0.57639754",
"0.5757466",
"0.57539165",
"0.5740656",
"0.5739118",
"0.57251096",
"0.5683777",
"0.5677705",
"0.5676546",
"0.56663984",
"0.56636155",
"0.56589735",
"0.56517327",
"0.5650029",
"0.5647069",
"0.56457776",
"0.56236684",
"0.5623639",
"0.561497",
"0.5614315",
"0.5613853",
"0.56070244",
"0.56058174",
"0.5597476",
"0.5596785",
"0.5592693",
"0.55835825",
"0.55822295",
"0.5565156",
"0.55622894",
"0.5561225",
"0.55581003",
"0.55540776",
"0.5548307",
"0.5543541",
"0.55390143",
"0.5530219",
"0.5528543",
"0.55277777",
"0.5527769",
"0.55208886",
"0.55204093",
"0.55118406",
"0.55112356",
"0.55105263"
] |
0.0
|
-1
|
> You can remove up to 20 tags at a time. If you remove a tag from all instances, the tag is automatically deleted.
|
def untag_resources(
self,
request: dds_20151201_models.UntagResourcesRequest,
) -> dds_20151201_models.UntagResourcesResponse:
runtime = util_models.RuntimeOptions()
return self.untag_resources_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_tag(tag):\n tag.destroy()",
"def RemoveTags(obj):\n tags = obj.GetTags() # Get tags\n for t in tags: # Iterate through tags\n t.Remove() # Remove tag",
"async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))",
"def remove_tag(args):",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []",
"def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)",
"def delete_taggit_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n TaggitTag.objects.all().delete()",
"def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()",
"def __delete__(self, instance):\r\n self._set_instance_tag_cache(instance, '')",
"def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))",
"def destroyContainer(tag): #@NoSelf",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_taggit_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n TaggitTaggedItem.objects.all().delete()",
"def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})",
"def delete_tags(configurationIds=None, tags=None):\n pass",
"def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)",
"def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete_tags(self, entry, tags, tag_template_name):\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for persisted_tag in persisted_tags:\n logging.info('Processing Tag from Template: %s ...',\n persisted_tag.template)\n tag_to_delete = None\n\n if tag_template_name in persisted_tag.template:\n tag_to_delete = persisted_tag\n for tag in tags:\n if tag.template == persisted_tag.template and \\\n tag.column == persisted_tag.column:\n tag_to_delete = None\n break\n\n if tag_to_delete:\n self.delete_tag(tag_to_delete)\n logging.info('Tag deleted: %s', tag_to_delete.name)\n else:\n logging.info('Tag is up-to-date: %s', persisted_tag.name)",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"def remove_tags(self, tags):\n for tag in tags:\n self.remove_tag(tag)\n\n return self",
"def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def tags_remove(self, item, tags):\n self._createTagAction(item, \"tags_remove\", tags)",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def remove_many_descriptors(self, uuids):",
"def remove_tag(self, tag):\n for task in self._tasks:\n task.remove_tag(tag)\n\n return self",
"def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))",
"def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]",
"async def delete_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"delete from tags where \n map_id in (select map_id from maps where map_path=?) and \n tag_name=? \"\"\"\n select(conn, insert_sql, (map_name, tag))\n await channel.send(f\"Removed tags `{' '.join(tags)}` from map {map_name}\")",
"def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass",
"def delete_bucket_tagging(Bucket=None):\n pass",
"def destroy_machines_by_tag(self, tag_name):\n for droplet in self.manager.get_all_droplets(tag_name=tag_name):\n eprint(\"Destroying %s\" % droplet.name)\n droplet.destroy()",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def tags():",
"def remove():",
"def remove(self):",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def test_remove_tag_from_derived_metric(self):\n pass",
"def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])",
"def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def stop_untagged_instances(untagged_instance_ids, temporary_user):\n try:\n logger.info('Stopping the untagged instances : ')\n logger.info(untagged_instance_ids)\n temporary_user.stop_instances(InstanceIds=untagged_instance_ids)\n except Exception as error:\n logger.info('The instances failed to stop with the following error : {}'.format(error))",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def remove_tag(self, index):\n\n model_index = self.GetItemData(index)\n self.DeleteItem(model_index)\n del self._clientData[model_index]",
"def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)",
"def remove_tag(self, tag):\n cp = self.copy()\n cp.tags.remove(tag)\n return cp",
"def untagAll(self, authenticationToken, guid):\r\n pass",
"def tags_clear(self, item, tags):\n self._createTagAction(item, \"tags_clear\", tags)",
"def delete_asg_tags(asg_name, key):\n logger.info('Deleting tag from asg key: {}...'.format(key))\n if not app_config['DRY_RUN']:\n response = client.delete_tags(\n Tags=[\n {\n 'Key': key,\n 'ResourceId': asg_name,\n 'ResourceType': 'auto-scaling-group'\n },\n ]\n )\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS asg tag modification operation did not succeed. Exiting.')\n raise Exception('AWS asg tag modification operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg tag modification due to dry run flag set')\n response = {'message': 'dry run only'}\n return response",
"def remove_tags(self, tags):\n for task in self._tasks:\n task.remove_tags(tags)\n\n return self",
"def remove():\n pass",
"async def remove(self, ctx, name: str):\n if self.config.hexists(\"config:tags:global\", name):\n if not checks.sudo_check(ctx.message):\n await ctx.send(\"Only {} can remove global tags.\".format(self.bot.owner))\n else:\n self.config.hdel(\"config:tags:global\", name)\n\n elif self.config.hexists(\"chan:{}:tags\".format(ctx.message.channel.id), name):\n self.config.hdel(\"chan:{}:tags\".format(ctx.message.channel.id), name)\n # Don't allow ability to remove global tags\n\n elif self.config.hexists(\"guild:{}:tags\".format(ctx.message.guild.id), name):\n self.config.hdel(\"guild:{}:tags\".format(ctx.message.guild.id, name))\n\n await ctx.send(\"Tag {} removed.\".format(name))",
"def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)",
"def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]",
"def cli(ctx):\n stopped = click.style(\"Stopped\", fg=\"red\")\n removed = click.style(\"Removed\", fg=\"blue\")\n for container in ctx.docker.get_containers():\n name = container.hostname\n node_name = ''.join([i for i in name if not i.isdigit()])\n image_name = container.dictionary['Config']['Image']\n if node_name in TO_KILL:\n container.stop(timeout=0)\n else:\n container.stop(timeout=5)\n # container.execute(\"poweroff\", \"root\", \"/\", False)\n # container.wait()\n ctx.log(\"Container %s --> %s\" % (name, stopped))\n container.remove(v=False, link=False, force=True)\n ctx.log(\"Container %s --> %s\" % (name, removed))\n ctx.state['containers'].remove(container.short_id)\n ctx.state.fast_dump()\n # remove untagged image\n if not image_name.startswith(ctx.prefix):\n ctx.docker.remove_image(image_name, force=True)\n ctx.docker.remove_network()",
"def tags_delete(tag_id):\n\n tags = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tags)\n db.session.commit()\n\n flash(f\"'{tags.name}' tag is deleted.\")\n\n return redirect(\"/tags\")",
"def completely_remove_tag(self, owner_userid, tag_name):\n\t\tself.log.debug(\"completely_remove_tag()\")\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\n\t\td = self.app.db.runOperation(\"SELECT zoto_remove_tag_from_all_user_images(%s, %s)\",\n\t\t\t\t (owner_userid, tag_name))\n\t\td.addCallback(lambda _: (0, \"success\"))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d",
"def quit(user, tag):\n def work():\n member = Member.get(user)\n member.remove_tag(tag)\n member.put()\n db.run_in_transaction(work)",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)",
"def remove_node_by_tagname(nodes: List, tagname: str):\n\n for remove in [node for node in nodes if node.tagname == tagname]:\n nodes.remove(remove)",
"def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def expungeTag(self, authenticationToken, guid):\r\n pass",
"def deleteTags(bufferNumber):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGS, TAGLINENUMBERS, BUFFERTICKS\n\n # try to delete the tags for the buffer {{{\n try:\n del TAGS[bufferNumber]\n del TAGLINENUMBERS[bufferNumber]\n del BUFFERTICKS[bufferNumber]\n except:\n pass\n # }}}\n # }}}",
"def delete_unused_tags( self, owner ):\n self.filter(\n owner = owner,\n documents__isnull = True).delete()",
"def test_networking_project_network_tag_delete(self):\n pass",
"def tags_remove(self, item_id, tags, **params):\n\n if isinstance(tags, basestring):\n tags = tags.split(',')\n\n self.queue('tags_remove', item_id=item_id, tags=tags, **params)",
"def remove(self, value):\n tags = self.__all_tags()\n if value in tags:\n tags.remove(value)\n self.__post_changes(tags)",
"def remove_tag(session, tagname, username='system_user'):\n session = validate_session(session)\n tag = tag_exists(session, tag_name=tagname)\n tagid= tag.id\n tag_stats = session.query(TagStats).\\\n filter(TagStats.tag_id == tag.id)\n if tag:\n try:\n tag_stats.delete()\n session.commit()\n session.delete(tag)\n session.commit()\n return(True, \"Tag %s was deleted\" % (tagname), tagid)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s does not exists\" % (tagname))",
"async def delete_concurrency_limit_by_tag(\n self,\n tag: str,\n ):\n try:\n await self._client.delete(\n f\"/concurrency_limits/tag/{tag}\",\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise",
"def multi_untag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t@stack\n\t\tdef delete_txn(txn, owner, tags, ids, tagger):\n\t\t\tfor tag in tags:\n\t\t\t\tid_list = []\n\t\t\t\tfor id in ids:\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(delete_txn, owner_userid, tag_names, image_ids, tag_userid)",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def clean_tags(location, max_tags):\n git = '/usr/bin/git'\n ensure_dir(location)\n with utils.cd(location):\n cmd = [\n git,\n 'for-each-ref',\n '--sort=taggerdate',\n '--format=%(refname)',\n 'refs/tags'\n ]\n\n tags = subprocess.check_output(cmd).splitlines()\n old_tags = []\n while len(tags) > max_tags:\n tag = tags.pop(0)\n if tag.startswith('refs/tags/'):\n tag = tag[10:]\n\n # Don't delete tags that aren't ours\n if not tag.startswith(TAG_PREFIX):\n continue\n\n old_tags.append(tag)\n\n # if there aren't any old tags, bail early\n if len(old_tags) == 0:\n return\n\n cmd = [git, 'tag', '-d']\n cmd += old_tags\n subprocess.check_call(cmd)",
"def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def tearDown(self):\n Tag.objects.all().delete()\n super(TagTest, self).tearDown()",
"def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)",
"def replace_all_tags(tags):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"truncate mustard.tags\");\n\t\tpsycopg2.extras.execute_values(cur,\n\t\t\t\"insert into mustard.tags (id, english_name, english_desc) values %s\",\n\t\t\ttags)\n\t\tcur.execute(\"update mustard.status set tags_updated = now()\")",
"def ex_delete_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'DeleteTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def solr_delete(instances):\n __solr_prepare(instances)",
"def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def test_delete_instances(self, instances_count, create_instance):\n instance_name = generate_ids('instance').next()\n create_instance(instance_name, count=instances_count)",
"def _drop_tags(target, *regexps):\n\n for tagname in list(target.keys()):\n for _ in (x for x in regexps if re.search(x, tagname)):\n try:\n del target[tagname]\n logger.debug('Drop tag {0}[{1}]'.format(\n type(target).__name__, tagname))\n except KeyError:\n pass\n break",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def delete_tag(request):\n try:\n tags = request.POST.getlist('tag_id', 0)\n tag = Tag.objects.filter(pk__in=tags).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Tag %s\" % tags)\n return format_ajax_response(True, \"Knoweldgebase tag deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting the specified knowledgebase tag.\")",
"def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def delete_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass",
"def delete_tag(delete_timestamps):\n\n ctx = dash.callback_context\n triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)\n\n # When the button is initially added, it fires a callback.\n # We want to prevent this callback from making changes to the update signal.\n if triggered_value is None:\n raise PreventUpdate\n\n # Unfortunately, we have to convert the stringified dict back to a dict.\n # Dash doesn't provide us any other method to see which element triggered the callback.\n # This isn't very elegant, but I don't see any other way to proceed.\n id_dict = utils.string_to_dict(triggered_id)\n tag_idx = id_dict[\"index\"]\n state.delete_tag(tag_idx)\n\n return constants.OK_SIGNAL",
"async def remove(self, container, uids):"
] |
[
"0.71796733",
"0.68758255",
"0.6856147",
"0.6738873",
"0.6660143",
"0.6617361",
"0.6605107",
"0.6569438",
"0.65644944",
"0.6558519",
"0.6515231",
"0.65021783",
"0.6444791",
"0.63917637",
"0.63694924",
"0.63107765",
"0.6270814",
"0.6265261",
"0.625803",
"0.62343985",
"0.61936",
"0.61813426",
"0.6131549",
"0.6117737",
"0.6114514",
"0.61061996",
"0.60744405",
"0.60744405",
"0.6065185",
"0.60596365",
"0.6049634",
"0.6035556",
"0.6025484",
"0.6011416",
"0.6006487",
"0.5997965",
"0.5994748",
"0.5968711",
"0.5966337",
"0.5948067",
"0.5936516",
"0.59007347",
"0.59001243",
"0.589101",
"0.5886979",
"0.5880894",
"0.58647823",
"0.5856848",
"0.5853042",
"0.58432096",
"0.58362496",
"0.5803527",
"0.5802742",
"0.58023524",
"0.57949513",
"0.5769311",
"0.576411",
"0.5756913",
"0.5752936",
"0.5740305",
"0.5737808",
"0.5723935",
"0.5683401",
"0.56768125",
"0.5675899",
"0.5665922",
"0.56633765",
"0.5656861",
"0.5650562",
"0.564953",
"0.5646241",
"0.56456345",
"0.56230634",
"0.56228745",
"0.5614958",
"0.5614236",
"0.56130165",
"0.56053716",
"0.5604748",
"0.5597436",
"0.5595911",
"0.5591793",
"0.5582363",
"0.5581984",
"0.5563924",
"0.55610055",
"0.55601794",
"0.5556565",
"0.5553744",
"0.55485034",
"0.5542228",
"0.55394053",
"0.5528767",
"0.55280477",
"0.5527355",
"0.55267096",
"0.5520377",
"0.5519385",
"0.55111957",
"0.55106366",
"0.5509457"
] |
0.0
|
-1
|
> You can remove up to 20 tags at a time. If you remove a tag from all instances, the tag is automatically deleted.
|
async def untag_resources_async(
self,
request: dds_20151201_models.UntagResourcesRequest,
) -> dds_20151201_models.UntagResourcesResponse:
runtime = util_models.RuntimeOptions()
return await self.untag_resources_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delete_tag(tag):\n tag.destroy()",
"def RemoveTags(obj):\n tags = obj.GetTags() # Get tags\n for t in tags: # Iterate through tags\n t.Remove() # Remove tag",
"async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))",
"def remove_tag(args):",
"def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []",
"def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)",
"def delete_taggit_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n TaggitTag.objects.all().delete()",
"def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()",
"def __delete__(self, instance):\r\n self._set_instance_tag_cache(instance, '')",
"def clean(self):\n tags = self.get_tags()\n for tag in tags:\n image_name = self.build_image_name(tag)\n try:\n self.client.images.remove(image_name, force=True)\n except Exception as ex:\n print('Cannot remove {}: {}'.format(tag, str(ex)))",
"def destroyContainer(tag): #@NoSelf",
"def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)",
"def delete_taggit_taggeditems(apps, schema_editor):\n TaggitTaggedItem = apps.get_model('taggit', 'TaggedItem')\n TaggitTaggedItem.objects.all().delete()",
"def test_remove_tags(self):\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tags': {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'}},\n ],\n })\n p.run()\n\n # verify initial tag set\n s = Session()\n client = s.client('azure.mgmt.resource.ResourceManagementClient')\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep',\n 'added-1': 'to-delete', 'added-2': 'to-delete'})\n\n p = self.load_policy({\n 'name': 'test-azure-remove-tag',\n 'resource': 'azure.resourcegroup',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'test_vm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['added-1', 'added-2']}\n ],\n })\n p.run()\n\n # verify tags removed and pre-existing tags not removed\n rg = [rg for rg in client.resource_groups.list() if rg.name == 'test_vm'][0]\n self.assertEqual(rg.tags,\n {'pre-existing-1': 'to-keep', 'pre-existing-2': 'to-keep'})",
"def delete_tags(configurationIds=None, tags=None):\n pass",
"def delete_all(self):\n for tag in self._segments['APP1'].get_tag_list():\n try:\n self.__delattr__(tag)\n except AttributeError:\n warnings.warn(\"could not delete tag \" + tag, RuntimeWarning)",
"def test_remove_single_tag(self):\n p = self.load_policy({\n 'name': 'test-azure-remove-single-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'tag',\n 'tag': 'tag1',\n 'value': 'to-delete'}\n ],\n })\n p.run()\n\n # verify the initial tag set\n s = Session()\n client = s.client('azure.mgmt.compute.ComputeManagementClient')\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'tag1': 'to-delete', 'testtag': 'testvalue'})\n\n p = self.load_policy({\n 'name': 'test-azure-tag',\n 'resource': 'azure.vm',\n 'filters': [\n {'type': 'value',\n 'key': 'name',\n 'op': 'eq',\n 'value_type': 'normalize',\n 'value': 'cctestvm'}\n ],\n 'actions': [\n {'type': 'untag',\n 'tags': ['tag1']}\n ],\n })\n p.run()\n\n # verify that the a tag is deleted without modifying existing tags\n vm = client.virtual_machines.get('test_vm', 'cctestvm')\n self.assertEqual(vm.tags, {'testtag': 'testvalue'})",
"def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200",
"def delete_tags(self, entry, tags, tag_template_name):\n persisted_tags = self.list_tags(entry.name)\n\n # Fetch GRPCIterator.\n persisted_tags = [tag for tag in persisted_tags]\n\n for persisted_tag in persisted_tags:\n logging.info('Processing Tag from Template: %s ...',\n persisted_tag.template)\n tag_to_delete = None\n\n if tag_template_name in persisted_tag.template:\n tag_to_delete = persisted_tag\n for tag in tags:\n if tag.template == persisted_tag.template and \\\n tag.column == persisted_tag.column:\n tag_to_delete = None\n break\n\n if tag_to_delete:\n self.delete_tag(tag_to_delete)\n logging.info('Tag deleted: %s', tag_to_delete.name)\n else:\n logging.info('Tag is up-to-date: %s', persisted_tag.name)",
"async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())",
"def remove_tags(self, tags):\n for tag in tags:\n self.remove_tag(tag)\n\n return self",
"def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])",
"def delete(self):\n request = self.tags_service.delete(path=self._path)\n request.execute()",
"def tags_remove(self, item, tags):\n self._createTagAction(item, \"tags_remove\", tags)",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def delete_tags(ResourceArn=None, TagKeys=None):\n pass",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def tag_instance(self, tags):\n self._request({\"instance-tags\": dict(tags)})",
"def remove_many_descriptors(self, uuids):",
"def remove_tag(self, tag):\n for task in self._tasks:\n task.remove_tag(tag)\n\n return self",
"def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))",
"def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]",
"async def delete_tags(tags: List[str], map_name: str, conn: Connection, channel: TextChannel) -> None:\n for tag in tags:\n insert_sql = \"\"\"delete from tags where \n map_id in (select map_id from maps where map_path=?) and \n tag_name=? \"\"\"\n select(conn, insert_sql, (map_name, tag))\n await channel.send(f\"Removed tags `{' '.join(tags)}` from map {map_name}\")",
"def remove_tags_from_resource(ResourceId=None, TagKeys=None):\n pass",
"def delete_bucket_tagging(Bucket=None):\n pass",
"def destroy_machines_by_tag(self, tag_name):\n for droplet in self.manager.get_all_droplets(tag_name=tag_name):\n eprint(\"Destroying %s\" % droplet.name)\n droplet.destroy()",
"def delete(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.delete(path, params, **options)",
"def tags():",
"def remove():",
"def remove(self):",
"def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)",
"def test_remove_tag_from_derived_metric(self):\n pass",
"def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])",
"def bulk_delete(self, **kwargs: Any) -> Response:\n tags = kwargs[\"rison\"]\n try:\n DeleteTagsCommand(tags).run()\n return self.response(200, message=f\"Deleted {len(tags)} tags\")\n except TagNotFoundError:\n return self.response_404()\n except TagInvalidError as ex:\n return self.response(422, message=f\"Invalid tag parameters: {tags}. {ex}\")\n except TagDeleteFailedError as ex:\n return self.response_422(message=str(ex))",
"def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError",
"def stop_untagged_instances(untagged_instance_ids, temporary_user):\n try:\n logger.info('Stopping the untagged instances : ')\n logger.info(untagged_instance_ids)\n temporary_user.stop_instances(InstanceIds=untagged_instance_ids)\n except Exception as error:\n logger.info('The instances failed to stop with the following error : {}'.format(error))",
"def delete(self, uuid):\n\n\t\treturn self._delete(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def _delete_tag_request():\n key = helpers.get('Tag.1.Key')\n resource_id = helpers.get('ResourceId.1')\n\n if resource_id in current_app.config['RESOURCE_TYPE_MAP']:\n resource_type = current_app.config['RESOURCE_TYPE_MAP'][resource_id]\n else:\n errors.invalid_request(\n str(resource_id) + \" not found in configuration\")\n\n args = {\n 'command': 'deleteTags',\n 'resourceids': resource_id,\n 'resourcetype': resource_type,\n 'tags[0].key': key\n }\n\n response = requester.make_request_async(args)\n\n return response",
"def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)",
"def remove_tag(self, index):\n\n model_index = self.GetItemData(index)\n self.DeleteItem(model_index)\n del self._clientData[model_index]",
"def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)",
"def remove_tag(self, tag):\n cp = self.copy()\n cp.tags.remove(tag)\n return cp",
"def untagAll(self, authenticationToken, guid):\r\n pass",
"def tags_clear(self, item, tags):\n self._createTagAction(item, \"tags_clear\", tags)",
"def delete_asg_tags(asg_name, key):\n logger.info('Deleting tag from asg key: {}...'.format(key))\n if not app_config['DRY_RUN']:\n response = client.delete_tags(\n Tags=[\n {\n 'Key': key,\n 'ResourceId': asg_name,\n 'ResourceType': 'auto-scaling-group'\n },\n ]\n )\n if response['ResponseMetadata']['HTTPStatusCode'] != requests.codes.ok:\n logger.info('AWS asg tag modification operation did not succeed. Exiting.')\n raise Exception('AWS asg tag modification operation did not succeed. Exiting.')\n else:\n logger.info('Skipping asg tag modification due to dry run flag set')\n response = {'message': 'dry run only'}\n return response",
"def remove_tags(self, tags):\n for task in self._tasks:\n task.remove_tags(tags)\n\n return self",
"def remove():\n pass",
"async def remove(self, ctx, name: str):\n if self.config.hexists(\"config:tags:global\", name):\n if not checks.sudo_check(ctx.message):\n await ctx.send(\"Only {} can remove global tags.\".format(self.bot.owner))\n else:\n self.config.hdel(\"config:tags:global\", name)\n\n elif self.config.hexists(\"chan:{}:tags\".format(ctx.message.channel.id), name):\n self.config.hdel(\"chan:{}:tags\".format(ctx.message.channel.id), name)\n # Don't allow ability to remove global tags\n\n elif self.config.hexists(\"guild:{}:tags\".format(ctx.message.guild.id), name):\n self.config.hdel(\"guild:{}:tags\".format(ctx.message.guild.id, name))\n\n await ctx.send(\"Tag {} removed.\".format(name))",
"def untag(self, uuid, tags=None):\n if isinstance(tags, basestring):\n tags = [tags]\n\n self._backend.untag(uuid, tags)",
"def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]",
"def cli(ctx):\n stopped = click.style(\"Stopped\", fg=\"red\")\n removed = click.style(\"Removed\", fg=\"blue\")\n for container in ctx.docker.get_containers():\n name = container.hostname\n node_name = ''.join([i for i in name if not i.isdigit()])\n image_name = container.dictionary['Config']['Image']\n if node_name in TO_KILL:\n container.stop(timeout=0)\n else:\n container.stop(timeout=5)\n # container.execute(\"poweroff\", \"root\", \"/\", False)\n # container.wait()\n ctx.log(\"Container %s --> %s\" % (name, stopped))\n container.remove(v=False, link=False, force=True)\n ctx.log(\"Container %s --> %s\" % (name, removed))\n ctx.state['containers'].remove(container.short_id)\n ctx.state.fast_dump()\n # remove untagged image\n if not image_name.startswith(ctx.prefix):\n ctx.docker.remove_image(image_name, force=True)\n ctx.docker.remove_network()",
"def tags_delete(tag_id):\n\n tags = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tags)\n db.session.commit()\n\n flash(f\"'{tags.name}' tag is deleted.\")\n\n return redirect(\"/tags\")",
"def completely_remove_tag(self, owner_userid, tag_name):\n\t\tself.log.debug(\"completely_remove_tag()\")\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\n\t\td = self.app.db.runOperation(\"SELECT zoto_remove_tag_from_all_user_images(%s, %s)\",\n\t\t\t\t (owner_userid, tag_name))\n\t\td.addCallback(lambda _: (0, \"success\"))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\treturn d",
"def quit(user, tag):\n def work():\n member = Member.get(user)\n member.remove_tag(tag)\n member.put()\n db.run_in_transaction(work)",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)",
"def remove_node_by_tagname(nodes: List, tagname: str):\n\n for remove in [node for node in nodes if node.tagname == tagname]:\n nodes.remove(remove)",
"def replace_tags(self, photo_id, tag_list):\n # get all the tags attached to the photo\n current_tags = self.db.make_query(\n '''\n select * from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n print(current_tags)\n\n # remove the current tags\n self.db.make_query(\n '''\n delete from photo_tag where photo_id = {}\n '''.format(photo_id)\n )\n\n for tag in tag_list:\n # add tags in the tag_list\n self.db.make_query(\n '''\n insert into photo_tag (photo_id, tag_name)\n values ({}, \"{}\")\n '''.format(photo_id, tag)\n )\n\n self.update_photo_count(tag)",
"def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)",
"def expungeTag(self, authenticationToken, guid):\r\n pass",
"def deleteTags(bufferNumber):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGS, TAGLINENUMBERS, BUFFERTICKS\n\n # try to delete the tags for the buffer {{{\n try:\n del TAGS[bufferNumber]\n del TAGLINENUMBERS[bufferNumber]\n del BUFFERTICKS[bufferNumber]\n except:\n pass\n # }}}\n # }}}",
"def delete_unused_tags( self, owner ):\n self.filter(\n owner = owner,\n documents__isnull = True).delete()",
"def test_networking_project_network_tag_delete(self):\n pass",
"def tags_remove(self, item_id, tags, **params):\n\n if isinstance(tags, basestring):\n tags = tags.split(',')\n\n self.queue('tags_remove', item_id=item_id, tags=tags, **params)",
"def remove(self, value):\n tags = self.__all_tags()\n if value in tags:\n tags.remove(value)\n self.__post_changes(tags)",
"def remove_tag(session, tagname, username='system_user'):\n session = validate_session(session)\n tag = tag_exists(session, tag_name=tagname)\n tagid= tag.id\n tag_stats = session.query(TagStats).\\\n filter(TagStats.tag_id == tag.id)\n if tag:\n try:\n tag_stats.delete()\n session.commit()\n session.delete(tag)\n session.commit()\n return(True, \"Tag %s was deleted\" % (tagname), tagid)\n except Exception as e:\n session.rollback()\n return(False, \"Tag %s does not exists\" % (tagname))",
"async def delete_concurrency_limit_by_tag(\n self,\n tag: str,\n ):\n try:\n await self._client.delete(\n f\"/concurrency_limits/tag/{tag}\",\n )\n except httpx.HTTPStatusError as e:\n if e.response.status_code == status.HTTP_404_NOT_FOUND:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise",
"def multi_untag_image(self, owner_userid, tag_userid, image_ids, tag_names):\n\t\towner_userid = validation.cast_integer(owner_userid, 'owner_userid')\n\t\tfor id in image_ids:\n\t\t\tid = validation.cast_integer(id, 'image_id')\n\t\t@stack\n\t\tdef delete_txn(txn, owner, tags, ids, tagger):\n\t\t\tfor tag in tags:\n\t\t\t\tid_list = []\n\t\t\t\tfor id in ids:\n\t\t\t\t\ttxn.execute(\"\"\"\n\t\t\t\t\t\tselect zoto_remove_user_image_tag(\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s,\n\t\t\t\t\t\t\t%s\n\t\t\t\t\t\t)\n\t\t\t\t\t\"\"\", (owner, id, tag, tagger))\n\n\t\treturn self.app.db.runInteraction(delete_txn, owner_userid, tag_names, image_ids, tag_userid)",
"def tag_updater(self, tags):\n for tag in tags:\n #check if the tag exists\n exists = False\n tag = self.tags.find_one({'TagName': tag})\n if tag is not None:\n self.tags.update_one({'TagName': tag}, {'$set': {'Count': tag['Count']+1}}) \n else:\n #insert new tag\n Id = self.id_generator(self.tags)\n self.tags.insert_one({\"Id\":Id, \"TagName\":tag, \"Count\":0})",
"def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True",
"def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)",
"def clean_tags(location, max_tags):\n git = '/usr/bin/git'\n ensure_dir(location)\n with utils.cd(location):\n cmd = [\n git,\n 'for-each-ref',\n '--sort=taggerdate',\n '--format=%(refname)',\n 'refs/tags'\n ]\n\n tags = subprocess.check_output(cmd).splitlines()\n old_tags = []\n while len(tags) > max_tags:\n tag = tags.pop(0)\n if tag.startswith('refs/tags/'):\n tag = tag[10:]\n\n # Don't delete tags that aren't ours\n if not tag.startswith(TAG_PREFIX):\n continue\n\n old_tags.append(tag)\n\n # if there aren't any old tags, bail early\n if len(old_tags) == 0:\n return\n\n cmd = [git, 'tag', '-d']\n cmd += old_tags\n subprocess.check_call(cmd)",
"def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'",
"def tag_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def tearDown(self):\n Tag.objects.all().delete()\n super(TagTest, self).tearDown()",
"def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)",
"def replace_all_tags(tags):\n\twith postgres, postgres.cursor() as cur:\n\t\tcur.execute(\"truncate mustard.tags\");\n\t\tpsycopg2.extras.execute_values(cur,\n\t\t\t\"insert into mustard.tags (id, english_name, english_desc) values %s\",\n\t\t\ttags)\n\t\tcur.execute(\"update mustard.status set tags_updated = now()\")",
"def ex_delete_tags(self, node, tags):\n if not tags:\n return\n\n params = { 'Action': 'DeleteTags',\n 'ResourceId.0': node.id }\n for i, key in enumerate(tags):\n params['Tag.%d.Key' % i] = key\n params['Tag.%d.Value' % i] = tags[key]\n\n self.connection.request(self.path,\n params=params.copy()).object",
"def solr_delete(instances):\n __solr_prepare(instances)",
"def test_remove_defined_tag(self, test, object_storage):\n namespace_name, bucket_name = self._get_bucket_details(object_storage)\n session_factory = test.oci_session_factory()\n policy = test.load_policy(\n {\n \"name\": \"bucket-remove-tag\",\n \"resource\": \"oci.bucket\",\n \"filters\": [\n {\"type\": \"value\", \"key\": \"name\", \"value\": bucket_name},\n ],\n \"actions\": [\n {\n \"type\": \"remove-tag\",\n \"defined_tags\": [\"cloud-custodian-test.mark-for-resize\"],\n },\n ],\n },\n session_factory=session_factory,\n )\n policy.run()\n resource = self._fetch_bucket_validation_data(\n policy.resource_manager, namespace_name, bucket_name\n )\n test.assertEqual(resource[\"name\"], bucket_name)\n test.assertEqual(self.get_defined_tag_value(resource[\"defined_tags\"]), None)",
"def test_delete_instances(self, instances_count, create_instance):\n instance_name = generate_ids('instance').next()\n create_instance(instance_name, count=instances_count)",
"def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)",
"def delete_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n\n db.session.delete(tag)\n db.session.commit()\n\n return redirect('/tags')",
"def _drop_tags(target, *regexps):\n\n for tagname in list(target.keys()):\n for _ in (x for x in regexps if re.search(x, tagname)):\n try:\n del target[tagname]\n logger.debug('Drop tag {0}[{1}]'.format(\n type(target).__name__, tagname))\n except KeyError:\n pass\n break",
"def stop(self, aws_tags: List[Dict]) -> None:\n for instance_arn in self.tag_api.get_resources(\"ec2:instance\", aws_tags):\n instance_id = instance_arn.split(\"/\")[-1]\n try:\n if not self.asg.describe_auto_scaling_instances(\n InstanceIds=[instance_id]\n )[\"AutoScalingInstances\"]:\n self.ec2.stop_instances(InstanceIds=[instance_id])\n print(f\"Stop instances {instance_id}\")\n except ClientError as exc:\n ec2_exception(\"instance\", instance_id, exc)",
"def delete_tag(request):\n try:\n tags = request.POST.getlist('tag_id', 0)\n tag = Tag.objects.filter(pk__in=tags).delete()\n ActionLogger().log(request.user, \"deleted\", \"Knowledgebase Tag %s\" % tags)\n return format_ajax_response(True, \"Knoweldgebase tag deleted successfully.\")\n except Exception as ex:\n logger.error(\"Failed to delete_tag: %s\" % ex)\n return format_ajax_response(False, \"There was an error deleting the specified knowledgebase tag.\")",
"def delete_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass",
"def delete_tag(delete_timestamps):\n\n ctx = dash.callback_context\n triggered_id, triggered_prop, triggered_value = utils.ctx_triggered_info(ctx)\n\n # When the button is initially added, it fires a callback.\n # We want to prevent this callback from making changes to the update signal.\n if triggered_value is None:\n raise PreventUpdate\n\n # Unfortunately, we have to convert the stringified dict back to a dict.\n # Dash doesn't provide us any other method to see which element triggered the callback.\n # This isn't very elegant, but I don't see any other way to proceed.\n id_dict = utils.string_to_dict(triggered_id)\n tag_idx = id_dict[\"index\"]\n state.delete_tag(tag_idx)\n\n return constants.OK_SIGNAL",
"async def remove(self, container, uids):"
] |
[
"0.7179029",
"0.6874668",
"0.6855494",
"0.673707",
"0.6659385",
"0.66167855",
"0.6604312",
"0.65682214",
"0.6563147",
"0.6558112",
"0.6515623",
"0.65010685",
"0.64438564",
"0.6391957",
"0.63677007",
"0.63088256",
"0.62703615",
"0.62650335",
"0.62569135",
"0.6232759",
"0.6192985",
"0.6180688",
"0.61307186",
"0.61168253",
"0.6114244",
"0.6105553",
"0.60734296",
"0.60734296",
"0.60640854",
"0.6062538",
"0.60482657",
"0.6035056",
"0.6024435",
"0.60107064",
"0.600515",
"0.59961486",
"0.599396",
"0.596841",
"0.59660447",
"0.594787",
"0.59346247",
"0.5899294",
"0.58990306",
"0.5889299",
"0.58853096",
"0.5879939",
"0.58636147",
"0.58582914",
"0.5852979",
"0.5842619",
"0.5835115",
"0.5803114",
"0.58022004",
"0.5802088",
"0.5794086",
"0.57680655",
"0.5763649",
"0.5756029",
"0.57513857",
"0.5739661",
"0.57374775",
"0.57229984",
"0.56833833",
"0.56763995",
"0.5674104",
"0.56644356",
"0.5662845",
"0.5656565",
"0.5649344",
"0.56482565",
"0.56461346",
"0.564525",
"0.5622778",
"0.56216794",
"0.56135905",
"0.5612889",
"0.5612194",
"0.5605596",
"0.56040543",
"0.55965036",
"0.55954784",
"0.5591793",
"0.55826396",
"0.55804217",
"0.5563078",
"0.55601656",
"0.5559777",
"0.55558515",
"0.5552118",
"0.5547926",
"0.5542182",
"0.5537875",
"0.55295",
"0.5528774",
"0.5526336",
"0.5525655",
"0.5521821",
"0.5519836",
"0.5510515",
"0.5510061",
"0.5507811"
] |
0.0
|
-1
|
The instance must be in the running state when you call this operation. > The available database versions depend on the storage engine used by the instance. For more information, see [Upgrades of MongoDB major versions](~~398673~~). You can also call the [DescribeAvailableEngineVersion](~~141355~~) operation to query the available database versions. > You cannot downgrade the MongoDB version of an instance after you upgrade it. > The instance is automatically restarted for two to three times during the upgrade process. Make sure that you upgrade the instance during offpeak hours.
|
def upgrade_dbinstance_engine_version_with_options(
self,
request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.engine_version):
query['EngineVersion'] = request.engine_version
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UpgradeDBInstanceEngineVersion',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def db_version(engine):\n return IMPL.db_version(engine)",
"def db_version():\n return IMPL.db_version()",
"def mmo_mongo_version(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"version\"]",
"def version(self):\r\n print migration.db_version()",
"def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")",
"def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db",
"def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result",
"def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db",
"def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_to_18():\n\n gear_doc = config.db.singletons.find_one({\"_id\": \"gears\"})\n\n if gear_doc is not None:\n gear_list = gear_doc.get('gear_list', [])\n for gear in gear_list:\n try:\n gears.upsert_gear(gear)\n except Exception as e:\n logging.error(\"\")\n logging.error(\"Error upgrading gear:\")\n logging.error(type(e))\n logging.error(\"Gear will not be retained. Document follows:\")\n logging.error(gear)\n logging.error(\"\")\n\n config.db.singletons.remove({\"_id\": \"gears\"})",
"def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'",
"def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:Saran1!@ds113736.mlab.com:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db",
"def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None",
"def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object",
"def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")",
"def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)",
"def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def environment_needs_upgrade(self, db):\n\n return False",
"def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")",
"def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def engine_version(self) -> str:\n return pulumi.get(self, \"engine_version\")",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def upgrade_environment(self, db):\n\n pass",
"def connect_to_eeg_db():\n logger.info(\"Connecting to MongoDB ...\")\n con = pymongo.MongoClient()\n db = con.eeg_db\n eeg = db.eeg\n logger.info(\"Connected and db opened.\")\n return con, db, eeg",
"def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")",
"def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def environment_needs_upgrade(self, db):\n if db is not None:\n db.commit()\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n try:\n cursor.execute(\"SELECT * FROM default_image\")\n return False\n except:\n return True",
"async def _get_pymongo_instance(app: web.Application , url) -> None:\n try:\n log.info(f'Getting pymongo instance')\n mongo_instance = dict()\n _cli = MongoClient(url)\n mongo_instance['client'] = _cli\n mongo_instance['db'] = _cli['versiondb']\n app['mongo'] = mongo_instance\n await asyncio.sleep(1)\n\n except Exception as e:\n\n log.error(f'_get_pymongo_instance {e}')\n raise e",
"def test_using(self):\n\n class Number2(Document):\n n = IntField()\n\n Number2.drop_collection()\n with switch_db(Number2, \"test2\") as Number2:\n Number2.drop_collection()\n\n for i in range(1, 10):\n t = Number2(n=i)\n t.switch_db(\"test2\")\n t.save()\n\n assert len(Number2.objects.using(\"test2\")) == 9",
"def is_mongod_running(self):\r\n \r\n try:\r\n _connect_to_mongo_port(int(self.port))\r\n return True\r\n except OSError:\r\n return False\r\n except Exception:\r\n return False",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()",
"def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]",
"def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)",
"def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]",
"def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False",
"def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def initdb(self):\n logger.info(\"Initializing database\")\n self.instances.drop()\n self.instances.create_index([('class_id', pymongo.HASHED)])\n # Creates a unique index\n self.instances.create_index(\n 'name',\n unique=True,\n partialFilterExpression={'deleted' : False}\n )\n start_time = time.time()\n timeout = 60 * 5\n while not self.axops_client.ping():\n if time.time() - start_time > timeout:\n raise AXTimeoutException(\"Timed out ({}s) waiting for axops availability\".format(timeout))\n time.sleep(3)\n\n for fix_doc in self.axdb_client.get_fixture_instances():\n instance = FixtureInstance.deserialize_axdbdoc(fix_doc)\n self.instances.insert_one(instance.mongodoc())\n\n logger.info(\"Database initialized\")",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")",
"def db_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_instance_id\")",
"def connection():\n from mongoengine import connect\n\n connect(host=\"mongomock://localhost\", alias=\"default\")",
"def num_databases ():\n return len(_dbobjects)",
"def get_db(db_config):\n hosts=[]\n db_uri=''\n\n for host in db_config['hosts']:\n hosts.append( host['host'] + \":\" + str(host['port'] ))\n\n db_uri = \"mongodb://\" + \\\n ','.join(hosts) + \\\n \"/?authSource=\" + db_config['auth_source'] + \\\n \"&replicaSet=\" + db_config['replica_set']\n\n\n db = MongoClient(\n db_uri,\n username = db_config['username'],\n password = db_config['password'],\n authMechanism = db_config['auth_mechanism'],\n ssl = (True if db_config['use_ssl'] else False),\n ssl_certfile = (db_config['ssl_certificate_file'] if db_config['ssl_certificate_file'] else None),\n ssl_ca_certs = (db_config['ssl_ca_file'] if db_config['ssl_ca_file'] else None),\n ssl_cert_reqs = (ssl.CERT_OPTIONAL if db_config['use_ssl'] else None),\n maxPoolSize = 5,\n wtimeout = 2500\n )[db_config['db_name']]\n \n return db",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongodb_connect():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n db = client.tweetbase\n return db",
"def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db",
"def connectToMongo():\n mongodb_uri = os.environ.get(\"DATABASE_URI\", \"\") or \"mongodb://localhost:27017\" \n client = pymongo.MongoClient(mongodb_uri)\n return client.insights_db # Declare the DB",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db",
"def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()",
"def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]",
"def __init__(self):\n self.client = MongoClient('localhost', 27017)#27017\n self.db = self.client.greency_db\n self.collection = self.db.inventory",
"def upgrade_to_22():\n\n logging.info('Upgrade v22, phase 1 of 3, upgrading gears...')\n\n # Add timestamps to gears.\n for gear in config.db.gears.find({}):\n now = datetime.datetime.utcnow()\n\n gear['created'] = now\n gear['modified'] = now\n\n config.db.gears.update({'_id': gear['_id']}, gear)\n\n # Ensure there cannot possibly be two gears of the same name with the same timestamp.\n # Plus or minus monotonic time.\n # A very silly solution, but we only ever need to do this once, on a double-digit number of documents.\n # Not worth the effort to, eg, rewind time and do math.\n time.sleep(1)\n logging.info(' Updated gear ' + str(gear['_id']) + ' ...')\n sys.stdout.flush()\n\n\n logging.info('Upgrade v22, phase 2 of 3, upgrading jobs...')\n\n # Now that they're updated, fetch all gears and hold them in memory.\n # This prevents extra database queries during the job upgrade.\n\n all_gears = list(config.db.gears.find({}))\n gears_map = { }\n\n for gear in all_gears:\n gear_name = gear['gear']['name']\n\n gears_map[gear_name] = gear\n\n # A dummy gear for missing refs\n dummy_gear = {\n 'category' : 'converter',\n 'gear' : {\n 'inputs' : {\n 'do-not-use' : {\n 'base' : 'file'\n }\n },\n 'maintainer' : 'Noone <nobody@example.example>',\n 'description' : 'This gear or job was referenced before gear versioning. Version information is not available for this gear.',\n 'license' : 'BSD-2-Clause',\n 'author' : 'Noone',\n 'url' : 'https://example.example',\n 'label' : 'Deprecated Gear',\n 'flywheel' : '0',\n 'source' : 'https://example.example',\n 'version' : '0.0.0',\n 'custom' : {\n 'flywheel': {\n 'invalid': True\n }\n },\n 'config' : {},\n 'name' : 'deprecated-gear'\n },\n 'exchange' : {\n 'git-commit' : '0000000000000000000000000000000000000000',\n 'rootfs-hash' : 'sha384:000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n 'rootfs-url' : 'https://example.example/does-not-exist.tgz'\n }\n }\n\n maximum = config.db.jobs.count()\n upgraded = 0\n\n # Blanket-assume gears were the latest in the DB pre-gear versioning.\n for job in config.db.jobs.find({}):\n\n # Look up latest gear by name, lose job name key\n gear_name = job['name']\n gear = gears_map.get(gear_name)\n\n if gear is None:\n logging.info('Job doc ' + str(job['_id']) + ' could not find gear ' + gear_name + ', creating...')\n\n new_gear = copy.deepcopy(dummy_gear)\n new_gear['gear']['name'] = gear_name\n\n # Save new gear, store id in memory\n resp = config.db.gears.insert_one(new_gear)\n new_id = resp.inserted_id\n new_gear['_id'] = str(new_id)\n\n # Insert gear into memory map\n gears_map[gear_name] = new_gear\n\n logging.info('Created gear ' + gear_name + ' with id ' + str(new_id) + '. Future jobs with this gear name with not alert.')\n\n gear = new_gear\n\n if gear is None:\n raise Exception(\"We don't understand python scopes ;( ;(\")\n\n # Store gear ID\n job.pop('name', None)\n job['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.jobs.update({'_id': job['_id']}, job)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' jobs of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, phase 3 of 3, upgrading batch...')\n\n maximum = config.db.batch.count()\n upgraded = 0\n\n for batch in config.db.batch.find({}):\n\n # Look up latest gear by name, lose job name key\n gear = gears.get_gear_by_name(batch['gear'])\n batch.pop('gear', None)\n\n # Store gear ID\n batch['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.batch.update({'_id': batch['_id']}, batch)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' batch of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, complete.')",
"def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db",
"def db_instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"db_instance_id\")",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def engine_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_version\")",
"def version(verbose: bool) -> None:\n print(Fore.BLUE + '==' * 15)\n print(\n Fore.YELLOW + 'Raven ' + Fore.CYAN + '0.1-dev'\n )\n print(Fore.BLUE + '==' * 15)\n if verbose:\n print(f'[DB]: {db.engine}')\n print(Style.RESET_ALL)",
"def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag",
"def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db",
"def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))",
"def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")",
"def __update_version(self):\r\n\r\n db_version = self.__get_db_version_int()\r\n if db_version == SCHEMA_VERSION:\r\n return\r\n\r\n #\r\n # Define functions for upgrading between schema versions\r\n #\r\n def update_2xto30():\r\n \"\"\"Incremental update of database from Freeseer 2.x and older to 3.0\r\n\r\n SCHEMA_VERSION is 300\r\n \"\"\"\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n def update_30to31():\r\n \"\"\"Performs incremental update of database from 3.0 and older to 3.1.\"\"\"\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old')\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_310)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time, Time, Time\r\n FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n #\r\n # Perform the upgrade\r\n #\r\n updaters = [update_2xto30, update_30to31]\r\n for updater in updaters:\r\n updater()\r\n\r\n QtSql.QSqlQuery('PRAGMA user_version = %i' % SCHEMA_VERSION)\r\n log.info('Upgraded presentations database from version {} to {}'.format(db_version, SCHEMA_VERSION))",
"def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info",
"def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version",
"def upgrade_to_23():\n\n db_config = config.db.singletons.find_one({'_id': 'config'})\n if db_config:\n auth_config = db_config.get('auth', {})\n if auth_config.get('auth_type'):\n auth_type = auth_config.pop('auth_type')\n config.db.singletons.update_one({'_id': 'config'}, {'$set': {'auth': {auth_type: auth_config}}})",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'",
"def upgrade_to_9():\n\n config.db.acquisitions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})\n config.db.sessions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})"
] |
[
"0.60887057",
"0.58272535",
"0.57711166",
"0.56431997",
"0.55289644",
"0.5520775",
"0.54857856",
"0.5430625",
"0.5372646",
"0.5360392",
"0.52892214",
"0.5286582",
"0.5278977",
"0.52780265",
"0.52661526",
"0.5259783",
"0.5211577",
"0.51565385",
"0.514894",
"0.5146001",
"0.5137969",
"0.5118388",
"0.5096248",
"0.5089838",
"0.5089794",
"0.5075383",
"0.50698084",
"0.5068324",
"0.50616986",
"0.504583",
"0.50418276",
"0.5007424",
"0.49795914",
"0.49715576",
"0.49700013",
"0.49494177",
"0.49456763",
"0.49426377",
"0.49322644",
"0.49283183",
"0.49154904",
"0.49143952",
"0.4905709",
"0.49052295",
"0.49037567",
"0.49018228",
"0.48875242",
"0.48735008",
"0.48676628",
"0.48582584",
"0.48488858",
"0.484391",
"0.48421982",
"0.48404807",
"0.48391417",
"0.483753",
"0.483753",
"0.483753",
"0.48347166",
"0.48345494",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48291487",
"0.48224378",
"0.48143822",
"0.48119178",
"0.48097154",
"0.48012707",
"0.48006055",
"0.4800156",
"0.4792472",
"0.47856984",
"0.47824818",
"0.4773425",
"0.4765348",
"0.47639638",
"0.4763806",
"0.47618374",
"0.47577664",
"0.47518453",
"0.47514722",
"0.4750991",
"0.47337508",
"0.47333506",
"0.47284353",
"0.47270066",
"0.47110906",
"0.4709931",
"0.47097647",
"0.47029164",
"0.47015804",
"0.47008976",
"0.46883383",
"0.46857816",
"0.46663383",
"0.46648097",
"0.46640822"
] |
0.49824673
|
32
|
The instance must be in the running state when you call this operation. > The available database versions depend on the storage engine used by the instance. For more information, see [Upgrades of MongoDB major versions](~~398673~~). You can also call the [DescribeAvailableEngineVersion](~~141355~~) operation to query the available database versions. > You cannot downgrade the MongoDB version of an instance after you upgrade it. > The instance is automatically restarted for two to three times during the upgrade process. Make sure that you upgrade the instance during offpeak hours.
|
async def upgrade_dbinstance_engine_version_with_options_async(
self,
request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.engine_version):
query['EngineVersion'] = request.engine_version
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UpgradeDBInstanceEngineVersion',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def db_version(engine):\n return IMPL.db_version(engine)",
"def db_version():\n return IMPL.db_version()",
"def mmo_mongo_version(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"version\"]",
"def version(self):\r\n print migration.db_version()",
"def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db",
"def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db",
"def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result",
"def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db",
"def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_to_18():\n\n gear_doc = config.db.singletons.find_one({\"_id\": \"gears\"})\n\n if gear_doc is not None:\n gear_list = gear_doc.get('gear_list', [])\n for gear in gear_list:\n try:\n gears.upsert_gear(gear)\n except Exception as e:\n logging.error(\"\")\n logging.error(\"Error upgrading gear:\")\n logging.error(type(e))\n logging.error(\"Gear will not be retained. Document follows:\")\n logging.error(gear)\n logging.error(\"\")\n\n config.db.singletons.remove({\"_id\": \"gears\"})",
"def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:Saran1!@ds113736.mlab.com:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db",
"def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'",
"def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None",
"def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object",
"def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")",
"def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)",
"def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def environment_needs_upgrade(self, db):\n\n return False",
"def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def engine_version(self) -> str:\n return pulumi.get(self, \"engine_version\")",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def upgrade_environment(self, db):\n\n pass",
"def connect_to_eeg_db():\n logger.info(\"Connecting to MongoDB ...\")\n con = pymongo.MongoClient()\n db = con.eeg_db\n eeg = db.eeg\n logger.info(\"Connected and db opened.\")\n return con, db, eeg",
"def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False",
"def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")",
"def environment_needs_upgrade(self, db):\n if db is not None:\n db.commit()\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n try:\n cursor.execute(\"SELECT * FROM default_image\")\n return False\n except:\n return True",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"async def _get_pymongo_instance(app: web.Application , url) -> None:\n try:\n log.info(f'Getting pymongo instance')\n mongo_instance = dict()\n _cli = MongoClient(url)\n mongo_instance['client'] = _cli\n mongo_instance['db'] = _cli['versiondb']\n app['mongo'] = mongo_instance\n await asyncio.sleep(1)\n\n except Exception as e:\n\n log.error(f'_get_pymongo_instance {e}')\n raise e",
"def test_using(self):\n\n class Number2(Document):\n n = IntField()\n\n Number2.drop_collection()\n with switch_db(Number2, \"test2\") as Number2:\n Number2.drop_collection()\n\n for i in range(1, 10):\n t = Number2(n=i)\n t.switch_db(\"test2\")\n t.save()\n\n assert len(Number2.objects.using(\"test2\")) == 9",
"def is_mongod_running(self):\r\n \r\n try:\r\n _connect_to_mongo_port(int(self.port))\r\n return True\r\n except OSError:\r\n return False\r\n except Exception:\r\n return False",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()",
"def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]",
"def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)",
"def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)",
"def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]",
"def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False",
"def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def initdb(self):\n logger.info(\"Initializing database\")\n self.instances.drop()\n self.instances.create_index([('class_id', pymongo.HASHED)])\n # Creates a unique index\n self.instances.create_index(\n 'name',\n unique=True,\n partialFilterExpression={'deleted' : False}\n )\n start_time = time.time()\n timeout = 60 * 5\n while not self.axops_client.ping():\n if time.time() - start_time > timeout:\n raise AXTimeoutException(\"Timed out ({}s) waiting for axops availability\".format(timeout))\n time.sleep(3)\n\n for fix_doc in self.axdb_client.get_fixture_instances():\n instance = FixtureInstance.deserialize_axdbdoc(fix_doc)\n self.instances.insert_one(instance.mongodoc())\n\n logger.info(\"Database initialized\")",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")",
"def db_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_instance_id\")",
"def connection():\n from mongoengine import connect\n\n connect(host=\"mongomock://localhost\", alias=\"default\")",
"def num_databases ():\n return len(_dbobjects)",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def get_db(db_config):\n hosts=[]\n db_uri=''\n\n for host in db_config['hosts']:\n hosts.append( host['host'] + \":\" + str(host['port'] ))\n\n db_uri = \"mongodb://\" + \\\n ','.join(hosts) + \\\n \"/?authSource=\" + db_config['auth_source'] + \\\n \"&replicaSet=\" + db_config['replica_set']\n\n\n db = MongoClient(\n db_uri,\n username = db_config['username'],\n password = db_config['password'],\n authMechanism = db_config['auth_mechanism'],\n ssl = (True if db_config['use_ssl'] else False),\n ssl_certfile = (db_config['ssl_certificate_file'] if db_config['ssl_certificate_file'] else None),\n ssl_ca_certs = (db_config['ssl_ca_file'] if db_config['ssl_ca_file'] else None),\n ssl_cert_reqs = (ssl.CERT_OPTIONAL if db_config['use_ssl'] else None),\n maxPoolSize = 5,\n wtimeout = 2500\n )[db_config['db_name']]\n \n return db",
"def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongodb_connect():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n db = client.tweetbase\n return db",
"def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db",
"def connectToMongo():\n mongodb_uri = os.environ.get(\"DATABASE_URI\", \"\") or \"mongodb://localhost:27017\" \n client = pymongo.MongoClient(mongodb_uri)\n return client.insights_db # Declare the DB",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db",
"def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]",
"def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()",
"def __init__(self):\n self.client = MongoClient('localhost', 27017)#27017\n self.db = self.client.greency_db\n self.collection = self.db.inventory",
"def db_instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"db_instance_id\")",
"def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db",
"def upgrade_to_22():\n\n logging.info('Upgrade v22, phase 1 of 3, upgrading gears...')\n\n # Add timestamps to gears.\n for gear in config.db.gears.find({}):\n now = datetime.datetime.utcnow()\n\n gear['created'] = now\n gear['modified'] = now\n\n config.db.gears.update({'_id': gear['_id']}, gear)\n\n # Ensure there cannot possibly be two gears of the same name with the same timestamp.\n # Plus or minus monotonic time.\n # A very silly solution, but we only ever need to do this once, on a double-digit number of documents.\n # Not worth the effort to, eg, rewind time and do math.\n time.sleep(1)\n logging.info(' Updated gear ' + str(gear['_id']) + ' ...')\n sys.stdout.flush()\n\n\n logging.info('Upgrade v22, phase 2 of 3, upgrading jobs...')\n\n # Now that they're updated, fetch all gears and hold them in memory.\n # This prevents extra database queries during the job upgrade.\n\n all_gears = list(config.db.gears.find({}))\n gears_map = { }\n\n for gear in all_gears:\n gear_name = gear['gear']['name']\n\n gears_map[gear_name] = gear\n\n # A dummy gear for missing refs\n dummy_gear = {\n 'category' : 'converter',\n 'gear' : {\n 'inputs' : {\n 'do-not-use' : {\n 'base' : 'file'\n }\n },\n 'maintainer' : 'Noone <nobody@example.example>',\n 'description' : 'This gear or job was referenced before gear versioning. Version information is not available for this gear.',\n 'license' : 'BSD-2-Clause',\n 'author' : 'Noone',\n 'url' : 'https://example.example',\n 'label' : 'Deprecated Gear',\n 'flywheel' : '0',\n 'source' : 'https://example.example',\n 'version' : '0.0.0',\n 'custom' : {\n 'flywheel': {\n 'invalid': True\n }\n },\n 'config' : {},\n 'name' : 'deprecated-gear'\n },\n 'exchange' : {\n 'git-commit' : '0000000000000000000000000000000000000000',\n 'rootfs-hash' : 'sha384:000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n 'rootfs-url' : 'https://example.example/does-not-exist.tgz'\n }\n }\n\n maximum = config.db.jobs.count()\n upgraded = 0\n\n # Blanket-assume gears were the latest in the DB pre-gear versioning.\n for job in config.db.jobs.find({}):\n\n # Look up latest gear by name, lose job name key\n gear_name = job['name']\n gear = gears_map.get(gear_name)\n\n if gear is None:\n logging.info('Job doc ' + str(job['_id']) + ' could not find gear ' + gear_name + ', creating...')\n\n new_gear = copy.deepcopy(dummy_gear)\n new_gear['gear']['name'] = gear_name\n\n # Save new gear, store id in memory\n resp = config.db.gears.insert_one(new_gear)\n new_id = resp.inserted_id\n new_gear['_id'] = str(new_id)\n\n # Insert gear into memory map\n gears_map[gear_name] = new_gear\n\n logging.info('Created gear ' + gear_name + ' with id ' + str(new_id) + '. Future jobs with this gear name with not alert.')\n\n gear = new_gear\n\n if gear is None:\n raise Exception(\"We don't understand python scopes ;( ;(\")\n\n # Store gear ID\n job.pop('name', None)\n job['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.jobs.update({'_id': job['_id']}, job)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' jobs of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, phase 3 of 3, upgrading batch...')\n\n maximum = config.db.batch.count()\n upgraded = 0\n\n for batch in config.db.batch.find({}):\n\n # Look up latest gear by name, lose job name key\n gear = gears.get_gear_by_name(batch['gear'])\n batch.pop('gear', None)\n\n # Store gear ID\n batch['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.batch.update({'_id': batch['_id']}, batch)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' batch of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, complete.')",
"def engine_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_version\")",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def version(verbose: bool) -> None:\n print(Fore.BLUE + '==' * 15)\n print(\n Fore.YELLOW + 'Raven ' + Fore.CYAN + '0.1-dev'\n )\n print(Fore.BLUE + '==' * 15)\n if verbose:\n print(f'[DB]: {db.engine}')\n print(Style.RESET_ALL)",
"def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag",
"def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db",
"def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))",
"def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")",
"def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info",
"def __update_version(self):\r\n\r\n db_version = self.__get_db_version_int()\r\n if db_version == SCHEMA_VERSION:\r\n return\r\n\r\n #\r\n # Define functions for upgrading between schema versions\r\n #\r\n def update_2xto30():\r\n \"\"\"Incremental update of database from Freeseer 2.x and older to 3.0\r\n\r\n SCHEMA_VERSION is 300\r\n \"\"\"\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n def update_30to31():\r\n \"\"\"Performs incremental update of database from 3.0 and older to 3.1.\"\"\"\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old')\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_310)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time, Time, Time\r\n FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n #\r\n # Perform the upgrade\r\n #\r\n updaters = [update_2xto30, update_30to31]\r\n for updater in updaters:\r\n updater()\r\n\r\n QtSql.QSqlQuery('PRAGMA user_version = %i' % SCHEMA_VERSION)\r\n log.info('Upgraded presentations database from version {} to {}'.format(db_version, SCHEMA_VERSION))",
"def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version",
"def upgrade_to_23():\n\n db_config = config.db.singletons.find_one({'_id': 'config'})\n if db_config:\n auth_config = db_config.get('auth', {})\n if auth_config.get('auth_type'):\n auth_type = auth_config.pop('auth_type')\n config.db.singletons.update_one({'_id': 'config'}, {'$set': {'auth': {auth_type: auth_config}}})",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'",
"def upgrade_to_9():\n\n config.db.acquisitions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})\n config.db.sessions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})"
] |
[
"0.6090064",
"0.58261514",
"0.57712024",
"0.5643622",
"0.5528457",
"0.55217195",
"0.5486818",
"0.54299986",
"0.5373658",
"0.53594774",
"0.52883273",
"0.52877307",
"0.52789235",
"0.5277654",
"0.52671677",
"0.52614397",
"0.52117026",
"0.51576954",
"0.5149471",
"0.51448",
"0.5138398",
"0.51192194",
"0.5095544",
"0.509113",
"0.5090594",
"0.5076884",
"0.5070981",
"0.5068047",
"0.5061071",
"0.50470567",
"0.5041852",
"0.5009157",
"0.49819115",
"0.49793664",
"0.49716318",
"0.49715245",
"0.4949127",
"0.49468297",
"0.494354",
"0.49323502",
"0.4929149",
"0.491661",
"0.49162745",
"0.4905803",
"0.49049786",
"0.4904943",
"0.4901661",
"0.4888061",
"0.4874457",
"0.48675877",
"0.48591995",
"0.48501697",
"0.48441422",
"0.48418206",
"0.48399162",
"0.48388088",
"0.48388088",
"0.48388088",
"0.48356122",
"0.48348704",
"0.48343194",
"0.48343194",
"0.48343194",
"0.48343194",
"0.48343194",
"0.48308718",
"0.4824732",
"0.48151627",
"0.48145443",
"0.48106912",
"0.4801606",
"0.4801468",
"0.4800981",
"0.4793366",
"0.47864214",
"0.47831044",
"0.4773887",
"0.47663268",
"0.47647443",
"0.47646576",
"0.47638246",
"0.475851",
"0.4753787",
"0.4752603",
"0.47505996",
"0.4734834",
"0.47337314",
"0.47303608",
"0.47277328",
"0.47118697",
"0.47103444",
"0.47102854",
"0.47035536",
"0.47023863",
"0.4701479",
"0.46889094",
"0.46852016",
"0.46665555",
"0.4664674",
"0.46629035"
] |
0.48418233
|
53
|
The instance must be in the running state when you call this operation. > The available database versions depend on the storage engine used by the instance. For more information, see [Upgrades of MongoDB major versions](~~398673~~). You can also call the [DescribeAvailableEngineVersion](~~141355~~) operation to query the available database versions. > You cannot downgrade the MongoDB version of an instance after you upgrade it. > The instance is automatically restarted for two to three times during the upgrade process. Make sure that you upgrade the instance during offpeak hours.
|
def upgrade_dbinstance_engine_version(
self,
request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,
) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:
runtime = util_models.RuntimeOptions()
return self.upgrade_dbinstance_engine_version_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def db_version(engine):\n return IMPL.db_version(engine)",
"def db_version():\n return IMPL.db_version()",
"def mmo_mongo_version(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"version\"]",
"def version(self):\r\n print migration.db_version()",
"def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")",
"def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db",
"def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result",
"def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db",
"def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_to_18():\n\n gear_doc = config.db.singletons.find_one({\"_id\": \"gears\"})\n\n if gear_doc is not None:\n gear_list = gear_doc.get('gear_list', [])\n for gear in gear_list:\n try:\n gears.upsert_gear(gear)\n except Exception as e:\n logging.error(\"\")\n logging.error(\"Error upgrading gear:\")\n logging.error(type(e))\n logging.error(\"Gear will not be retained. Document follows:\")\n logging.error(gear)\n logging.error(\"\")\n\n config.db.singletons.remove({\"_id\": \"gears\"})",
"def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'",
"def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:Saran1!@ds113736.mlab.com:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db",
"def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None",
"def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object",
"def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")",
"def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)",
"def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def environment_needs_upgrade(self, db):\n\n return False",
"def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def engine_version(self) -> str:\n return pulumi.get(self, \"engine_version\")",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def upgrade_environment(self, db):\n\n pass",
"def connect_to_eeg_db():\n logger.info(\"Connecting to MongoDB ...\")\n con = pymongo.MongoClient()\n db = con.eeg_db\n eeg = db.eeg\n logger.info(\"Connected and db opened.\")\n return con, db, eeg",
"def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")",
"def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def environment_needs_upgrade(self, db):\n if db is not None:\n db.commit()\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n try:\n cursor.execute(\"SELECT * FROM default_image\")\n return False\n except:\n return True",
"async def _get_pymongo_instance(app: web.Application , url) -> None:\n try:\n log.info(f'Getting pymongo instance')\n mongo_instance = dict()\n _cli = MongoClient(url)\n mongo_instance['client'] = _cli\n mongo_instance['db'] = _cli['versiondb']\n app['mongo'] = mongo_instance\n await asyncio.sleep(1)\n\n except Exception as e:\n\n log.error(f'_get_pymongo_instance {e}')\n raise e",
"def test_using(self):\n\n class Number2(Document):\n n = IntField()\n\n Number2.drop_collection()\n with switch_db(Number2, \"test2\") as Number2:\n Number2.drop_collection()\n\n for i in range(1, 10):\n t = Number2(n=i)\n t.switch_db(\"test2\")\n t.save()\n\n assert len(Number2.objects.using(\"test2\")) == 9",
"def is_mongod_running(self):\r\n \r\n try:\r\n _connect_to_mongo_port(int(self.port))\r\n return True\r\n except OSError:\r\n return False\r\n except Exception:\r\n return False",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()",
"def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]",
"def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)",
"def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]",
"def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False",
"def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def initdb(self):\n logger.info(\"Initializing database\")\n self.instances.drop()\n self.instances.create_index([('class_id', pymongo.HASHED)])\n # Creates a unique index\n self.instances.create_index(\n 'name',\n unique=True,\n partialFilterExpression={'deleted' : False}\n )\n start_time = time.time()\n timeout = 60 * 5\n while not self.axops_client.ping():\n if time.time() - start_time > timeout:\n raise AXTimeoutException(\"Timed out ({}s) waiting for axops availability\".format(timeout))\n time.sleep(3)\n\n for fix_doc in self.axdb_client.get_fixture_instances():\n instance = FixtureInstance.deserialize_axdbdoc(fix_doc)\n self.instances.insert_one(instance.mongodoc())\n\n logger.info(\"Database initialized\")",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")",
"def db_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_instance_id\")",
"def connection():\n from mongoengine import connect\n\n connect(host=\"mongomock://localhost\", alias=\"default\")",
"def num_databases ():\n return len(_dbobjects)",
"def get_db(db_config):\n hosts=[]\n db_uri=''\n\n for host in db_config['hosts']:\n hosts.append( host['host'] + \":\" + str(host['port'] ))\n\n db_uri = \"mongodb://\" + \\\n ','.join(hosts) + \\\n \"/?authSource=\" + db_config['auth_source'] + \\\n \"&replicaSet=\" + db_config['replica_set']\n\n\n db = MongoClient(\n db_uri,\n username = db_config['username'],\n password = db_config['password'],\n authMechanism = db_config['auth_mechanism'],\n ssl = (True if db_config['use_ssl'] else False),\n ssl_certfile = (db_config['ssl_certificate_file'] if db_config['ssl_certificate_file'] else None),\n ssl_ca_certs = (db_config['ssl_ca_file'] if db_config['ssl_ca_file'] else None),\n ssl_cert_reqs = (ssl.CERT_OPTIONAL if db_config['use_ssl'] else None),\n maxPoolSize = 5,\n wtimeout = 2500\n )[db_config['db_name']]\n \n return db",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongodb_connect():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n db = client.tweetbase\n return db",
"def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db",
"def connectToMongo():\n mongodb_uri = os.environ.get(\"DATABASE_URI\", \"\") or \"mongodb://localhost:27017\" \n client = pymongo.MongoClient(mongodb_uri)\n return client.insights_db # Declare the DB",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db",
"def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()",
"def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]",
"def __init__(self):\n self.client = MongoClient('localhost', 27017)#27017\n self.db = self.client.greency_db\n self.collection = self.db.inventory",
"def upgrade_to_22():\n\n logging.info('Upgrade v22, phase 1 of 3, upgrading gears...')\n\n # Add timestamps to gears.\n for gear in config.db.gears.find({}):\n now = datetime.datetime.utcnow()\n\n gear['created'] = now\n gear['modified'] = now\n\n config.db.gears.update({'_id': gear['_id']}, gear)\n\n # Ensure there cannot possibly be two gears of the same name with the same timestamp.\n # Plus or minus monotonic time.\n # A very silly solution, but we only ever need to do this once, on a double-digit number of documents.\n # Not worth the effort to, eg, rewind time and do math.\n time.sleep(1)\n logging.info(' Updated gear ' + str(gear['_id']) + ' ...')\n sys.stdout.flush()\n\n\n logging.info('Upgrade v22, phase 2 of 3, upgrading jobs...')\n\n # Now that they're updated, fetch all gears and hold them in memory.\n # This prevents extra database queries during the job upgrade.\n\n all_gears = list(config.db.gears.find({}))\n gears_map = { }\n\n for gear in all_gears:\n gear_name = gear['gear']['name']\n\n gears_map[gear_name] = gear\n\n # A dummy gear for missing refs\n dummy_gear = {\n 'category' : 'converter',\n 'gear' : {\n 'inputs' : {\n 'do-not-use' : {\n 'base' : 'file'\n }\n },\n 'maintainer' : 'Noone <nobody@example.example>',\n 'description' : 'This gear or job was referenced before gear versioning. Version information is not available for this gear.',\n 'license' : 'BSD-2-Clause',\n 'author' : 'Noone',\n 'url' : 'https://example.example',\n 'label' : 'Deprecated Gear',\n 'flywheel' : '0',\n 'source' : 'https://example.example',\n 'version' : '0.0.0',\n 'custom' : {\n 'flywheel': {\n 'invalid': True\n }\n },\n 'config' : {},\n 'name' : 'deprecated-gear'\n },\n 'exchange' : {\n 'git-commit' : '0000000000000000000000000000000000000000',\n 'rootfs-hash' : 'sha384:000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n 'rootfs-url' : 'https://example.example/does-not-exist.tgz'\n }\n }\n\n maximum = config.db.jobs.count()\n upgraded = 0\n\n # Blanket-assume gears were the latest in the DB pre-gear versioning.\n for job in config.db.jobs.find({}):\n\n # Look up latest gear by name, lose job name key\n gear_name = job['name']\n gear = gears_map.get(gear_name)\n\n if gear is None:\n logging.info('Job doc ' + str(job['_id']) + ' could not find gear ' + gear_name + ', creating...')\n\n new_gear = copy.deepcopy(dummy_gear)\n new_gear['gear']['name'] = gear_name\n\n # Save new gear, store id in memory\n resp = config.db.gears.insert_one(new_gear)\n new_id = resp.inserted_id\n new_gear['_id'] = str(new_id)\n\n # Insert gear into memory map\n gears_map[gear_name] = new_gear\n\n logging.info('Created gear ' + gear_name + ' with id ' + str(new_id) + '. Future jobs with this gear name with not alert.')\n\n gear = new_gear\n\n if gear is None:\n raise Exception(\"We don't understand python scopes ;( ;(\")\n\n # Store gear ID\n job.pop('name', None)\n job['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.jobs.update({'_id': job['_id']}, job)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' jobs of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, phase 3 of 3, upgrading batch...')\n\n maximum = config.db.batch.count()\n upgraded = 0\n\n for batch in config.db.batch.find({}):\n\n # Look up latest gear by name, lose job name key\n gear = gears.get_gear_by_name(batch['gear'])\n batch.pop('gear', None)\n\n # Store gear ID\n batch['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.batch.update({'_id': batch['_id']}, batch)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' batch of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, complete.')",
"def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db",
"def db_instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"db_instance_id\")",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def engine_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_version\")",
"def version(verbose: bool) -> None:\n print(Fore.BLUE + '==' * 15)\n print(\n Fore.YELLOW + 'Raven ' + Fore.CYAN + '0.1-dev'\n )\n print(Fore.BLUE + '==' * 15)\n if verbose:\n print(f'[DB]: {db.engine}')\n print(Style.RESET_ALL)",
"def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag",
"def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db",
"def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))",
"def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")",
"def __update_version(self):\r\n\r\n db_version = self.__get_db_version_int()\r\n if db_version == SCHEMA_VERSION:\r\n return\r\n\r\n #\r\n # Define functions for upgrading between schema versions\r\n #\r\n def update_2xto30():\r\n \"\"\"Incremental update of database from Freeseer 2.x and older to 3.0\r\n\r\n SCHEMA_VERSION is 300\r\n \"\"\"\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n def update_30to31():\r\n \"\"\"Performs incremental update of database from 3.0 and older to 3.1.\"\"\"\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old')\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_310)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time, Time, Time\r\n FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n #\r\n # Perform the upgrade\r\n #\r\n updaters = [update_2xto30, update_30to31]\r\n for updater in updaters:\r\n updater()\r\n\r\n QtSql.QSqlQuery('PRAGMA user_version = %i' % SCHEMA_VERSION)\r\n log.info('Upgraded presentations database from version {} to {}'.format(db_version, SCHEMA_VERSION))",
"def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info",
"def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version",
"def upgrade_to_23():\n\n db_config = config.db.singletons.find_one({'_id': 'config'})\n if db_config:\n auth_config = db_config.get('auth', {})\n if auth_config.get('auth_type'):\n auth_type = auth_config.pop('auth_type')\n config.db.singletons.update_one({'_id': 'config'}, {'$set': {'auth': {auth_type: auth_config}}})",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'",
"def upgrade_to_9():\n\n config.db.acquisitions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})\n config.db.sessions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})"
] |
[
"0.60887057",
"0.57711166",
"0.56431997",
"0.55289644",
"0.5520775",
"0.54857856",
"0.5430625",
"0.5372646",
"0.5360392",
"0.52892214",
"0.5286582",
"0.5278977",
"0.52780265",
"0.52661526",
"0.5259783",
"0.5211577",
"0.51565385",
"0.514894",
"0.5146001",
"0.5137969",
"0.5118388",
"0.5096248",
"0.5089838",
"0.5089794",
"0.5075383",
"0.50698084",
"0.5068324",
"0.50616986",
"0.504583",
"0.50418276",
"0.5007424",
"0.49824673",
"0.49795914",
"0.49715576",
"0.49700013",
"0.49494177",
"0.49456763",
"0.49426377",
"0.49322644",
"0.49283183",
"0.49154904",
"0.49143952",
"0.4905709",
"0.49052295",
"0.49037567",
"0.49018228",
"0.48875242",
"0.48735008",
"0.48676628",
"0.48582584",
"0.48488858",
"0.484391",
"0.48421982",
"0.48404807",
"0.48391417",
"0.483753",
"0.483753",
"0.483753",
"0.48347166",
"0.48345494",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48327628",
"0.48291487",
"0.48224378",
"0.48143822",
"0.48119178",
"0.48097154",
"0.48012707",
"0.48006055",
"0.4800156",
"0.4792472",
"0.47856984",
"0.47824818",
"0.4773425",
"0.4765348",
"0.47639638",
"0.4763806",
"0.47618374",
"0.47577664",
"0.47518453",
"0.47514722",
"0.4750991",
"0.47337508",
"0.47333506",
"0.47284353",
"0.47270066",
"0.47110906",
"0.4709931",
"0.47097647",
"0.47029164",
"0.47015804",
"0.47008976",
"0.46883383",
"0.46857816",
"0.46663383",
"0.46648097",
"0.46640822"
] |
0.58272535
|
1
|
The instance must be in the running state when you call this operation. > The available database versions depend on the storage engine used by the instance. For more information, see [Upgrades of MongoDB major versions](~~398673~~). You can also call the [DescribeAvailableEngineVersion](~~141355~~) operation to query the available database versions. > You cannot downgrade the MongoDB version of an instance after you upgrade it. > The instance is automatically restarted for two to three times during the upgrade process. Make sure that you upgrade the instance during offpeak hours.
|
async def upgrade_dbinstance_engine_version_async(
self,
request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,
) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:
runtime = util_models.RuntimeOptions()
return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def db_version(engine):\n return IMPL.db_version(engine)",
"def db_version():\n return IMPL.db_version()",
"def mmo_mongo_version(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"version\"]",
"def version(self):\r\n print migration.db_version()",
"def upgrade_to_1():\n config.db.singletons.insert_one({'_id': 'version', 'database': 1})",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db",
"def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def db_connect():\n client = pymongo.MongoClient(get_project_settings().get(\"MONGO_URI\"))\n db = client.vehicles\n\n return db",
"def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result",
"def api_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_URI).get_database()",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db",
"def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")",
"def upgrade_to_18():\n\n gear_doc = config.db.singletons.find_one({\"_id\": \"gears\"})\n\n if gear_doc is not None:\n gear_list = gear_doc.get('gear_list', [])\n for gear in gear_list:\n try:\n gears.upsert_gear(gear)\n except Exception as e:\n logging.error(\"\")\n logging.error(\"Error upgrading gear:\")\n logging.error(type(e))\n logging.error(\"Gear will not be retained. Document follows:\")\n logging.error(gear)\n logging.error(\"\")\n\n config.db.singletons.remove({\"_id\": \"gears\"})",
"def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'",
"def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:Saran1!@ds113736.mlab.com:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db",
"def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None",
"def get_db():\n client = MongoClient(\"mongodb://admin:therightfit@ds125555.\" +\n \"mlab.com:25555/the_right_fit\")\n db_object = client['the_right_fit']\n return db_object",
"def GetMigrationStatus(self, instance):\n raise HypervisorError(\"Migration not supported by the chroot hypervisor\")",
"def upgrade_to_8():\n\n colls = config.db.collection_names()\n to_be_removed = ['version', 'config', 'static']\n # If we are in a bad state (singletons exists but so do any of the colls in to be removed)\n # remove singletons to try again\n if 'singletons' in colls and set(to_be_removed).intersection(set(colls)):\n config.db.drop_collection('singletons')\n\n if 'singletons' not in config.db.collection_names():\n static = config.db.static.find({})\n if static.count() > 0:\n config.db.singletons.insert_many(static)\n config.db.singletons.insert(config.db.version.find({}))\n\n configs = config.db.config.find({'latest': True},{'latest':0})\n if configs.count() == 1:\n c = configs[0]\n c['_id'] = 'config'\n config.db.singletons.insert_one(c)\n\n for c in to_be_removed:\n if c in config.db.collection_names():\n config.db.drop_collection(c)",
"def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)",
"def environment_needs_upgrade(self, db):\n\n return False",
"def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version",
"def test_upgrade_with_auto_upgrade_latest_engine_enabled():",
"def engine_version(self) -> str:\n return pulumi.get(self, \"engine_version\")",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def upgrade_environment(self, db):\n\n pass",
"def connect_to_eeg_db():\n logger.info(\"Connecting to MongoDB ...\")\n con = pymongo.MongoClient()\n db = con.eeg_db\n eeg = db.eeg\n logger.info(\"Connected and db opened.\")\n return con, db, eeg",
"def __init__(self, db_name='leaderboard'):\n key = os.getenv('ATLAS_KEY')\n self.valid = key is not None\n self.client = None\n self.database = None\n if self.valid:\n try:\n self.client = pymongo.MongoClient(key % db_name)\n self.database = self.client[db_name]\n except pymongo.errors.ConfigurationError:\n self.valid = False",
"def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")",
"def environment_needs_upgrade(self, db):\n if db is not None:\n db.commit()\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n try:\n cursor.execute(\"SELECT * FROM default_image\")\n return False\n except:\n return True",
"async def _get_pymongo_instance(app: web.Application , url) -> None:\n try:\n log.info(f'Getting pymongo instance')\n mongo_instance = dict()\n _cli = MongoClient(url)\n mongo_instance['client'] = _cli\n mongo_instance['db'] = _cli['versiondb']\n app['mongo'] = mongo_instance\n await asyncio.sleep(1)\n\n except Exception as e:\n\n log.error(f'_get_pymongo_instance {e}')\n raise e",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def test_using(self):\n\n class Number2(Document):\n n = IntField()\n\n Number2.drop_collection()\n with switch_db(Number2, \"test2\") as Number2:\n Number2.drop_collection()\n\n for i in range(1, 10):\n t = Number2(n=i)\n t.switch_db(\"test2\")\n t.save()\n\n assert len(Number2.objects.using(\"test2\")) == 9",
"def is_mongod_running(self):\r\n \r\n try:\r\n _connect_to_mongo_port(int(self.port))\r\n return True\r\n except OSError:\r\n return False\r\n except Exception:\r\n return False",
"def get_running_instance_count(body):\n\tbody = dict(body)\n\tconn = rds.connect_to_region(region_name='us-west-2',\n\t\t\t\t\t\t\t\taws_access_key_id=body[\"aws_access_key_id\"],\n\t\t\t\t\t\t\t\taws_secret_access_key=body[\"aws_secret_access_key\"])\n\tinstance_status = conn.get_all_dbinstances()\n\trunning_instances = 0\n\tfor item in instance_status:\n\t\tif (item.state_name == 'available'):\n\t\t\trunning_instances += 1\n\treturn {\"data\":running_instances}",
"def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()",
"def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]",
"def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)",
"def check(self, instance):\n\n def total_seconds(td):\n \"\"\"\n Returns total seconds of a timedelta in a way that's safe for\n Python < 2.7\n \"\"\"\n if hasattr(td, 'total_seconds'):\n return td.total_seconds()\n else:\n return (\n lag.microseconds +\n (lag.seconds + lag.days * 24 * 3600) * 10**6\n ) / 10.0**6\n\n if 'server' not in instance:\n raise Exception(\"Missing 'server' in mongo config\")\n\n # x.509 authentication\n ssl_params = {\n 'ssl': instance.get('ssl', None),\n 'ssl_keyfile': instance.get('ssl_keyfile', None),\n 'ssl_certfile': instance.get('ssl_certfile', None),\n 'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),\n 'ssl_ca_certs': instance.get('ssl_ca_certs', None)\n }\n\n for key, param in ssl_params.items():\n if param is None:\n del ssl_params[key]\n\n server = instance['server']\n username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(server, sanitize_username=bool(ssl_params))\n additional_metrics = instance.get('additional_metrics', [])\n\n # Get the list of metrics to collect\n collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics\n metrics_to_collect = self._get_metrics_to_collect(\n server,\n additional_metrics\n )\n\n # Tagging\n tags = instance.get('tags', [])\n # ...de-dupe tags to avoid a memory leak\n tags = list(set(tags))\n\n if not db_name:\n self.log.info('No MongoDB database found in URI. Defaulting to admin.')\n db_name = 'admin'\n\n service_check_tags = [\n \"db:%s\" % db_name\n ]\n service_check_tags.extend(tags)\n\n # ...add the `server` tag to the metrics' tags only\n # (it's added in the backend for service checks)\n tags.append('server:%s' % clean_server_name)\n\n if nodelist:\n host = nodelist[0][0]\n port = nodelist[0][1]\n service_check_tags = service_check_tags + [\n \"host:%s\" % host,\n \"port:%s\" % port\n ]\n\n timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000\n try:\n cli = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,\n **ssl_params)\n # some commands can only go against the admin DB\n admindb = cli['admin']\n db = cli[db_name]\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n\n # Authenticate\n do_auth = True\n use_x509 = ssl_params and not password\n\n if not username:\n self.log.debug(\n u\"A username is required to authenticate to `%s`\", server\n )\n do_auth = False\n\n if do_auth:\n if auth_source:\n self.log.info(\"authSource was specified in the the server URL: using '%s' as the authentication database\", auth_source)\n self._authenticate(cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags)\n else:\n self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)\n\n try:\n status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)\n except Exception:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.CRITICAL,\n tags=service_check_tags)\n raise\n else:\n self.service_check(\n self.SERVICE_CHECK_NAME,\n AgentCheck.OK,\n tags=service_check_tags)\n\n if status['ok'] == 0:\n raise Exception(status['errmsg'].__str__())\n\n ops = db.current_op()\n status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0\n\n status['stats'] = db.command('dbstats')\n dbstats = {}\n dbstats[db_name] = {'stats': status['stats']}\n\n # Handle replica data, if any\n # See\n # http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa\n try:\n data = {}\n dbnames = []\n\n replSet = admindb.command('replSetGetStatus')\n if replSet:\n primary = None\n current = None\n\n # need a new connection to deal with replica sets\n setname = replSet.get('set')\n cli_rs = pymongo.mongo_client.MongoClient(\n server,\n socketTimeoutMS=timeout,\n connectTimeoutMS=timeout,\n serverSelectionTimeoutMS=timeout,\n replicaset=setname,\n read_preference=pymongo.ReadPreference.NEAREST,\n **ssl_params)\n\n if do_auth:\n if auth_source:\n self._authenticate(cli_rs[auth_source], username, password, use_x509, server, service_check_tags)\n else:\n self._authenticate(cli_rs[db_name], username, password, use_x509, server, service_check_tags)\n\n # Replication set information\n replset_name = replSet['set']\n replset_state = self.get_state_name(replSet['myState']).lower()\n\n tags.extend([\n u\"replset_name:{0}\".format(replset_name),\n u\"replset_state:{0}\".format(replset_state),\n ])\n\n # Find nodes: master and current node (ourself)\n for member in replSet.get('members'):\n if member.get('self'):\n current = member\n if int(member.get('state')) == 1:\n primary = member\n\n # Compute a lag time\n if current is not None and primary is not None:\n if 'optimeDate' in primary and 'optimeDate' in current:\n lag = primary['optimeDate'] - current['optimeDate']\n data['replicationLag'] = total_seconds(lag)\n\n if current is not None:\n data['health'] = current['health']\n\n data['state'] = replSet['myState']\n\n if current is not None:\n total = 0.0\n cfg = cli_rs['local']['system.replset'].find_one()\n for member in cfg.get('members'):\n total += member.get('votes', 1)\n if member['_id'] == current['_id']:\n data['votes'] = member.get('votes', 1)\n data['voteFraction'] = data['votes'] / total\n\n status['replSet'] = data\n\n # Submit events\n self._report_replica_set_state(\n data['state'], clean_server_name, replset_name, self.agentConfig\n )\n\n except Exception as e:\n if \"OperationFailure\" in repr(e) and \"not running with --replSet\" in str(e):\n pass\n else:\n raise e\n\n # If these keys exist, remove them for now as they cannot be serialized\n try:\n status['backgroundFlushing'].pop('last_finished')\n except KeyError:\n pass\n try:\n status.pop('localTime')\n except KeyError:\n pass\n\n dbnames = cli.database_names()\n self.gauge('mongodb.dbs', len(dbnames), tags=tags)\n\n for db_n in dbnames:\n db_aux = cli[db_n]\n dbstats[db_n] = {'stats': db_aux.command('dbstats')}\n\n # Go through the metrics and save the values\n for metric_name in metrics_to_collect:\n # each metric is of the form: x.y.z with z optional\n # and can be found at status[x][y][z]\n value = status\n\n if metric_name.startswith('stats'):\n continue\n else:\n try:\n for c in metric_name.split(\".\"):\n value = value[c]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(value)))\n\n # Submit the metric\n submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n for st, value in dbstats.iteritems():\n for metric_name in metrics_to_collect:\n if not metric_name.startswith('stats.'):\n continue\n\n try:\n val = value['stats'][metric_name.split('.')[1]]\n except KeyError:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(val, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(metric_name, type(val))\n )\n\n # Submit the metric\n metrics_tags = (\n tags +\n [\n u\"cluster:db:{0}\".format(st), # FIXME 6.0 - keep for backward compatibility\n u\"db:{0}\".format(st),\n ]\n )\n\n submit_method, metric_name_alias = \\\n self._resolve_metric(metric_name, metrics_to_collect)\n submit_method(self, metric_name_alias, val, tags=metrics_tags)\n\n if _is_affirmative(instance.get('collections_indexes_stats')):\n mongo_version = cli.server_info().get('version', '0.0')\n if LooseVersion(mongo_version) >= LooseVersion(\"3.2\"):\n self._collect_indexes_stats(instance, db, tags)\n else:\n self.log.error(\"'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s\", mongo_version)\n\n # Report the usage metrics for dbs/collections\n if 'top' in additional_metrics:\n try:\n dbtop = db.command('top')\n for ns, ns_metrics in dbtop['totals'].iteritems():\n if \".\" not in ns:\n continue\n\n # configure tags for db name and collection name\n dbname, collname = ns.split(\".\", 1)\n ns_tags = tags + [\"db:%s\" % dbname, \"collection:%s\" % collname]\n\n # iterate over DBTOP metrics\n for m in self.TOP_METRICS:\n # each metric is of the form: x.y.z with z optional\n # and can be found at ns_metrics[x][y][z]\n value = ns_metrics\n try:\n for c in m.split(\".\"):\n value = value[c]\n except Exception:\n continue\n\n # value is now status[x][y][z]\n if not isinstance(value, (int, long, float)):\n raise TypeError(\n u\"{0} value is a {1}, it should be an int, a float or a long instead.\"\n .format(m, type(value))\n )\n\n # Submit the metric\n submit_method, metric_name_alias = \\\n self._resolve_metric(m, metrics_to_collect, prefix=\"usage\")\n submit_method(self, metric_name_alias, value, tags=ns_tags)\n except Exception as e:\n self.log.warning('Failed to record `top` metrics %s' % str(e))\n\n\n if 'local' in dbnames: # it might not be if we are connectiing through mongos\n # Fetch information analogous to Mongo's db.getReplicationInfo()\n localdb = cli['local']\n\n oplog_data = {}\n\n for ol_collection_name in (\"oplog.rs\", \"oplog.$main\"):\n ol_options = localdb[ol_collection_name].options()\n if ol_options:\n break\n\n if ol_options:\n try:\n oplog_data['logSizeMB'] = round(\n ol_options['size'] / 2.0 ** 20, 2\n )\n\n oplog = localdb[ol_collection_name]\n\n oplog_data['usedSizeMB'] = round(\n localdb.command(\"collstats\", ol_collection_name)['size'] / 2.0 ** 20, 2\n )\n\n op_asc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.ASCENDING).limit(1)\n op_dsc_cursor = oplog.find({\"ts\": {\"$exists\": 1}}).sort(\"$natural\", pymongo.DESCENDING).limit(1)\n\n try:\n first_timestamp = op_asc_cursor[0]['ts'].as_datetime()\n last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()\n oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)\n except (IndexError, KeyError):\n # if the oplog collection doesn't have any entries\n # if an object in the collection doesn't have a ts value, we ignore it\n pass\n except KeyError:\n # encountered an error trying to access options.size for the oplog collection\n self.log.warning(u\"Failed to record `ReplicationInfo` metrics.\")\n\n for (m, value) in oplog_data.iteritems():\n submit_method, metric_name_alias = \\\n self._resolve_metric('oplog.%s' % m, metrics_to_collect)\n submit_method(self, metric_name_alias, value, tags=tags)\n\n else:\n self.log.debug('\"local\" database not in dbnames. Not collecting ReplicationInfo metrics')\n\n # get collection level stats\n try:\n # Ensure that you're on the right db\n db = cli[db_name]\n # grab the collections from the configutation\n coll_names = instance.get('collections', [])\n # loop through the collections\n for coll_name in coll_names:\n # grab the stats from the collection\n stats = db.command(\"collstats\", coll_name)\n # loop through the metrics\n for m in self.collection_metrics_names:\n coll_tags = tags + [\"db:%s\" % db_name, \"collection:%s\" % coll_name]\n value = stats.get(m, None)\n if not value:\n continue\n\n # if it's the index sizes, then it's a dict.\n if m == 'indexSizes':\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n # loop through the indexes\n for (idx, val) in value.iteritems():\n # we tag the index\n idx_tags = coll_tags + [\"index:%s\" % idx]\n submit_method(self, metric_name_alias, val, tags=idx_tags)\n else:\n submit_method, metric_name_alias = \\\n self._resolve_metric('collection.%s' % m, self.COLLECTION_METRICS)\n submit_method(self, metric_name_alias, value, tags=coll_tags)\n except Exception as e:\n self.log.warning(u\"Failed to record `collection` metrics.\")\n self.log.exception(e)",
"def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"engine_version\")",
"def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False",
"def get_db(request: Request) -> MongoWrapper:\n return request.app.state.db",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"engine_version\")",
"def initdb(self):\n logger.info(\"Initializing database\")\n self.instances.drop()\n self.instances.create_index([('class_id', pymongo.HASHED)])\n # Creates a unique index\n self.instances.create_index(\n 'name',\n unique=True,\n partialFilterExpression={'deleted' : False}\n )\n start_time = time.time()\n timeout = 60 * 5\n while not self.axops_client.ping():\n if time.time() - start_time > timeout:\n raise AXTimeoutException(\"Timed out ({}s) waiting for axops availability\".format(timeout))\n time.sleep(3)\n\n for fix_doc in self.axdb_client.get_fixture_instances():\n instance = FixtureInstance.deserialize_axdbdoc(fix_doc)\n self.instances.insert_one(instance.mongodoc())\n\n logger.info(\"Database initialized\")",
"def get_instance_OLD (self):\n instances = self.data['instances']\n if not len(instances) == 1:\n raise Exception, \"ArchivalObject: %d Instances found\" % len(instances)\n return instances[0]",
"def electrolytedb():\n if not check_for_mongodb():\n pytest.skip(\"MongoDB is required\")",
"def db_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"db_instance_id\")",
"def connection():\n from mongoengine import connect\n\n connect(host=\"mongomock://localhost\", alias=\"default\")",
"def num_databases ():\n return len(_dbobjects)",
"def get_db(db_config):\n hosts=[]\n db_uri=''\n\n for host in db_config['hosts']:\n hosts.append( host['host'] + \":\" + str(host['port'] ))\n\n db_uri = \"mongodb://\" + \\\n ','.join(hosts) + \\\n \"/?authSource=\" + db_config['auth_source'] + \\\n \"&replicaSet=\" + db_config['replica_set']\n\n\n db = MongoClient(\n db_uri,\n username = db_config['username'],\n password = db_config['password'],\n authMechanism = db_config['auth_mechanism'],\n ssl = (True if db_config['use_ssl'] else False),\n ssl_certfile = (db_config['ssl_certificate_file'] if db_config['ssl_certificate_file'] else None),\n ssl_ca_certs = (db_config['ssl_ca_file'] if db_config['ssl_ca_file'] else None),\n ssl_cert_reqs = (ssl.CERT_OPTIONAL if db_config['use_ssl'] else None),\n maxPoolSize = 5,\n wtimeout = 2500\n )[db_config['db_name']]\n \n return db",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongodb_connect():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n db = client.tweetbase\n return db",
"def get_db():\n if not hasattr(g, 'mongo_db'):\n g.db = get_mongo_db()\n\n return g.db",
"def connectToMongo():\n mongodb_uri = os.environ.get(\"DATABASE_URI\", \"\") or \"mongodb://localhost:27017\" \n client = pymongo.MongoClient(mongodb_uri)\n return client.insights_db # Declare the DB",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\n@cluster0.z3tac.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db",
"def get_dbserver(self):\n servers = self.get_dbservers()\n assert servers, \"starter: don't have instances!\"\n return servers[0]",
"def upgrade_db():\n import publicprize.db_upgrade\n\n backup_db()\n for field, date in (\n (\"submission_start\", \"6/16/2017 12:0:0\"),\n (\"submission_end\", \"9/7/2017 12:0:0\"),\n (\"public_voting_start\", \"9/8/2017 12:0:0\"),\n (\"public_voting_end\", \"9/15/2017 12:0:0\"),\n (\"judging_start\", \"9/27/2017 12:0:0\"),\n (\"judging_end\", \"9/27/2017 19:0:0\"),\n ):\n set_contest_date_time('esprit-venture-challenge', date, field)\n db.session.commit()",
"def __init__(self):\n self.client = MongoClient('localhost', 27017)#27017\n self.db = self.client.greency_db\n self.collection = self.db.inventory",
"def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db",
"def db_instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"db_instance_id\")",
"def upgrade_to_22():\n\n logging.info('Upgrade v22, phase 1 of 3, upgrading gears...')\n\n # Add timestamps to gears.\n for gear in config.db.gears.find({}):\n now = datetime.datetime.utcnow()\n\n gear['created'] = now\n gear['modified'] = now\n\n config.db.gears.update({'_id': gear['_id']}, gear)\n\n # Ensure there cannot possibly be two gears of the same name with the same timestamp.\n # Plus or minus monotonic time.\n # A very silly solution, but we only ever need to do this once, on a double-digit number of documents.\n # Not worth the effort to, eg, rewind time and do math.\n time.sleep(1)\n logging.info(' Updated gear ' + str(gear['_id']) + ' ...')\n sys.stdout.flush()\n\n\n logging.info('Upgrade v22, phase 2 of 3, upgrading jobs...')\n\n # Now that they're updated, fetch all gears and hold them in memory.\n # This prevents extra database queries during the job upgrade.\n\n all_gears = list(config.db.gears.find({}))\n gears_map = { }\n\n for gear in all_gears:\n gear_name = gear['gear']['name']\n\n gears_map[gear_name] = gear\n\n # A dummy gear for missing refs\n dummy_gear = {\n 'category' : 'converter',\n 'gear' : {\n 'inputs' : {\n 'do-not-use' : {\n 'base' : 'file'\n }\n },\n 'maintainer' : 'Noone <nobody@example.example>',\n 'description' : 'This gear or job was referenced before gear versioning. Version information is not available for this gear.',\n 'license' : 'BSD-2-Clause',\n 'author' : 'Noone',\n 'url' : 'https://example.example',\n 'label' : 'Deprecated Gear',\n 'flywheel' : '0',\n 'source' : 'https://example.example',\n 'version' : '0.0.0',\n 'custom' : {\n 'flywheel': {\n 'invalid': True\n }\n },\n 'config' : {},\n 'name' : 'deprecated-gear'\n },\n 'exchange' : {\n 'git-commit' : '0000000000000000000000000000000000000000',\n 'rootfs-hash' : 'sha384:000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n 'rootfs-url' : 'https://example.example/does-not-exist.tgz'\n }\n }\n\n maximum = config.db.jobs.count()\n upgraded = 0\n\n # Blanket-assume gears were the latest in the DB pre-gear versioning.\n for job in config.db.jobs.find({}):\n\n # Look up latest gear by name, lose job name key\n gear_name = job['name']\n gear = gears_map.get(gear_name)\n\n if gear is None:\n logging.info('Job doc ' + str(job['_id']) + ' could not find gear ' + gear_name + ', creating...')\n\n new_gear = copy.deepcopy(dummy_gear)\n new_gear['gear']['name'] = gear_name\n\n # Save new gear, store id in memory\n resp = config.db.gears.insert_one(new_gear)\n new_id = resp.inserted_id\n new_gear['_id'] = str(new_id)\n\n # Insert gear into memory map\n gears_map[gear_name] = new_gear\n\n logging.info('Created gear ' + gear_name + ' with id ' + str(new_id) + '. Future jobs with this gear name with not alert.')\n\n gear = new_gear\n\n if gear is None:\n raise Exception(\"We don't understand python scopes ;( ;(\")\n\n # Store gear ID\n job.pop('name', None)\n job['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.jobs.update({'_id': job['_id']}, job)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' jobs of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, phase 3 of 3, upgrading batch...')\n\n maximum = config.db.batch.count()\n upgraded = 0\n\n for batch in config.db.batch.find({}):\n\n # Look up latest gear by name, lose job name key\n gear = gears.get_gear_by_name(batch['gear'])\n batch.pop('gear', None)\n\n # Store gear ID\n batch['gear_id'] = str(gear['_id'])\n\n # Save\n config.db.batch.update({'_id': batch['_id']}, batch)\n\n upgraded += 1\n if upgraded % 1000 == 0:\n logging.info(' Processed ' + str(upgraded) + ' batch of ' + str(maximum) + '...')\n\n\n logging.info('Upgrade v22, complete.')",
"def engine_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"engine_version\")",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def version(verbose: bool) -> None:\n print(Fore.BLUE + '==' * 15)\n print(\n Fore.YELLOW + 'Raven ' + Fore.CYAN + '0.1-dev'\n )\n print(Fore.BLUE + '==' * 15)\n if verbose:\n print(f'[DB]: {db.engine}')\n print(Style.RESET_ALL)",
"def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag",
"def get_db():\n\n if not hasattr(g, 'mongo_db'):\n client = MongoClient(C.MONGODB_DATABASE_URI)\n g.db = client.test\n return g.db",
"def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def ping(context):\n\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(SSH_CONFIG_PATH)\n documentdb_config = aws_infrastructure.tasks.library.documentdb.DocumentDBConfig.load(DOCUMENTDB_CONFIG_PATH)\n\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(\n ssh_config=ssh_config,\n ) as ssh_client:\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n remote_host=documentdb_config.endpoint,\n remote_port=documentdb_config.port,\n ) as ssh_port_forward:\n client = MongoClient(\n host=[\n 'localhost'\n ],\n port=ssh_port_forward.local_port,\n connect=True,\n username=documentdb_config.admin_user,\n password=documentdb_config.admin_password,\n tls=True,\n tlsInsecure=True,\n )\n\n print(client.admin.command('ping'))",
"def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")",
"def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info",
"def __update_version(self):\r\n\r\n db_version = self.__get_db_version_int()\r\n if db_version == SCHEMA_VERSION:\r\n return\r\n\r\n #\r\n # Define functions for upgrading between schema versions\r\n #\r\n def update_2xto30():\r\n \"\"\"Incremental update of database from Freeseer 2.x and older to 3.0\r\n\r\n SCHEMA_VERSION is 300\r\n \"\"\"\r\n if db_version > 300:\r\n log.debug('Database newer than schema version 300.')\r\n return # No update needed\r\n\r\n log.debug('Updating to schema 300.')\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old') # temporary table\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_300)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n def update_30to31():\r\n \"\"\"Performs incremental update of database from 3.0 and older to 3.1.\"\"\"\r\n QtSql.QSqlQuery('ALTER TABLE presentations RENAME TO presentations_old')\r\n self.__create_presentations_table(PRESENTATIONS_SCHEMA_310)\r\n QtSql.QSqlQuery(\"\"\"INSERT INTO presentations\r\n SELECT Id, Title, Speaker, Description, Level, Event, Room, Time, Time, Time\r\n FROM presentations_old\"\"\")\r\n QtSql.QSqlQuery('DROP TABLE presentations_old')\r\n\r\n #\r\n # Perform the upgrade\r\n #\r\n updaters = [update_2xto30, update_30to31]\r\n for updater in updaters:\r\n updater()\r\n\r\n QtSql.QSqlQuery('PRAGMA user_version = %i' % SCHEMA_VERSION)\r\n log.info('Upgraded presentations database from version {} to {}'.format(db_version, SCHEMA_VERSION))",
"def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version",
"def upgrade_to_23():\n\n db_config = config.db.singletons.find_one({'_id': 'config'})\n if db_config:\n auth_config = db_config.get('auth', {})\n if auth_config.get('auth_type'):\n auth_type = auth_config.pop('auth_type')\n config.db.singletons.update_one({'_id': 'config'}, {'$set': {'auth': {auth_type: auth_config}}})",
"def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version",
"def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'",
"def upgrade_to_9():\n\n config.db.acquisitions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})\n config.db.sessions.update_many({'timestamp':''}, {'$unset': {'timestamp': ''}})"
] |
[
"0.60901463",
"0.5825471",
"0.57710415",
"0.5643417",
"0.5529335",
"0.5520822",
"0.5485604",
"0.5430104",
"0.5372222",
"0.53590053",
"0.52879465",
"0.52787435",
"0.5277866",
"0.5267304",
"0.526129",
"0.5212593",
"0.51575416",
"0.51503915",
"0.5145446",
"0.5138863",
"0.51193225",
"0.5095775",
"0.50917137",
"0.5091186",
"0.5077215",
"0.50708914",
"0.50676465",
"0.50611746",
"0.50474524",
"0.50424445",
"0.5008201",
"0.49810055",
"0.49801278",
"0.49709794",
"0.49704796",
"0.49489212",
"0.49473375",
"0.49437666",
"0.49320483",
"0.49289787",
"0.49170792",
"0.49163818",
"0.49064016",
"0.49052927",
"0.4904626",
"0.49016684",
"0.4890246",
"0.48744887",
"0.4868371",
"0.48598304",
"0.48506624",
"0.48447642",
"0.484119",
"0.48410434",
"0.48410183",
"0.48381466",
"0.48381466",
"0.48381466",
"0.48365673",
"0.4835243",
"0.48336393",
"0.48336393",
"0.48336393",
"0.48336393",
"0.48336393",
"0.48305595",
"0.48226365",
"0.4815805",
"0.48126972",
"0.4810529",
"0.48020783",
"0.48016974",
"0.47986895",
"0.47939876",
"0.47867605",
"0.47830674",
"0.47742215",
"0.47667944",
"0.476496",
"0.47635505",
"0.47630793",
"0.47584856",
"0.4753126",
"0.4751997",
"0.4750706",
"0.47339877",
"0.47335467",
"0.4729051",
"0.4728544",
"0.47120193",
"0.47107536",
"0.4709495",
"0.47039026",
"0.47021094",
"0.47011456",
"0.46890125",
"0.46850505",
"0.46667677",
"0.46651056",
"0.46633062"
] |
0.5288145
|
10
|
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
|
def upgrade_dbinstance_kernel_version_with_options(
self,
request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UpgradeDBInstanceKernelVersion',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),
self.call_api(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def upgrade_dbinstance_kernel_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)",
"async def upgrade_dbinstance_kernel_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)",
"async def upgrade_dbinstance_kernel_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart_kernel(self, kernel_id, now=False):",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass",
"def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass",
"def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version",
"def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )",
"def shutdown_kernel(self, kernel_id, now=False, restart=False):",
"async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def reboot_instance(InstanceId=None):\n pass",
"def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def restart_kernel(self, now=False, **kw):",
"def upgrade_kernel_without_openstack(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n non_openstack_nodes = [node for node in env.roledefs['all'] if node not in env.roledefs['openstack']]\n execute('create_installer_repo')\n execute('create_install_repo_without_openstack', *tgzs)\n nodes = []\n with settings(host_string=env.roledefs['cfgm'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n\n if ('red hat' in dist.lower() or 'centos linux' in dist.lower()) and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_package_installed_info(package, os_type, *non_openstack_nodes)\n if not nodes['not_installed']:\n print \"Nodes are already booted with expected version\"\n return\n if nodes['installed']:\n print \"Nodes (%s) are already booted in expected \"\\\n \"kernel version\" % \", \".join(nodes['installed'])\n\n execute(upgrade_kernel_node, *nodes['not_installed'], **kwargs)\n if reboot == 'yes':\n if env.host_string in nodes:\n nodes.remove(env.host_string).append(env.host_string)\n reboot_nodes(*nodes['not_installed'])\n else:\n print \"WARNING: Reboot Skipped as reboot=False; \"\\\n \"Reboot manually to avoid misconfiguration\"",
"async def restart_dbinstance_with_options_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade(self, old_version, new_version):\n pass",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def shutdown_kernel(self, now=False, restart=False):",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def shut_down_instance_if_swap_used():\n stats = statistics.report_statistics()\n swap_usage_total = stats.get('swap_usage_total', None)\n ram_usage_total = stats.get('ram_usage_total', None)\n\n logger.info('Checking swap and RAM usage...')\n\n if swap_usage_total and ram_usage_total:\n try:\n swap_usage_total = float(ram_usage_total)\n ram_usage_total = float(ram_usage_total)\n except:\n logger.error('Swap and RAM usage check failed during float() conversion')\n return\n\n if ram_usage_total > 70:\n if swap_usage_total > 10:\n # we're swapping very badly!\n logger.error('Swap and RAM usage is too high! Terminating instance')\n try:\n conn = boto.connect_ec2()\n instance_id = get_instance_metadata()['instance-id']\n conn.terminate_instances(instance_id, decrement_capacity=True)\n except Exception as e:\n logger.error('Failed to terminate instance, exception: %s' % str(e))",
"def downgrade_app_db(app, user, migration_version):\n ctx.logger.info(\n 'Downgrading %s DB to revision: %s', app.capitalize(),\n migration_version\n )\n run('db-migrate-down-to', app, user, migration_version)",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def update_instances_os ( ec2_conn, vpc, base_name, restart = False ) :\n instances = get_all_vpc_instances( ec2_conn, vpc )\n status = ssh_call_vpc( ec2_conn, base_name, instances, \"sudo yum -y update\", True )\n if restart and status == 0 :\n for instance in instances :\n instance.reboot( )",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def environment_needs_upgrade(self, db):\n\n return False",
"def deregister_rds_db_instance(RdsDbInstanceArn=None):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def upgrade_environment(self, db):\n\n pass",
"def upgrade_kernel_node(*args, **kwargs):\n for host_string in args:\n with settings(host_string=host_string):\n execute('create_install_repo_node', host_string)\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n print \"upgrading apparmor before upgrading kernel\"\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif version == '14.04':\n if 'version' in kwargs:\n kernel_ver = kwargs.get('version')\n else:\n kernel_ver = \"3.13.0-106\"\n print \"Installing \"+kernel_ver+\" kernel headers\"\n apt_install([\"linux-headers-\"+kernel_ver,\n \"linux-headers-\"+kernel_ver+\"-generic\"])\n print \"Upgrading the kernel to \"+kernel_ver\n apt_install([\"linux-image-\"+kernel_ver+\"-generic\",\n \"linux-image-extra-\"+kernel_ver+\"-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'red hat' in dist.lower() and version.startswith('7'):\n print \"Upgrading RHEL kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n print \"Upgrading Centos kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)'\n execute('set_grub_default_node', host_string, value=default_grub)",
"def shutdown_kernel(self, now=False, restart=False):\n pass",
"async def switch_dbinstance_ha_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return await self.switch_dbinstance_hawith_options_async(request, runtime)",
"def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def delete_instance(db_instance):\n rds = boto3.client('rds')\n rds.delete_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n SkipFinalSnapshot=True\n )",
"def upgrade(self, instance_id, cpus=None, memory=None,\r\n nic_speed=None, public=True):\r\n package_items = self._get_package_items()\r\n item_id = []\r\n if cpus:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'cpus', cpus, public)})\r\n if memory:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'memory', memory)})\r\n if nic_speed:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'nic_speed', nic_speed)})\r\n\r\n order = {}\r\n order['complexType'] = \\\r\n 'SoftLayer_Container_Product_Order_Virtual_Guest_Upgrade'\r\n order['virtualGuests'] = [{'id': int(instance_id)}]\r\n order['prices'] = item_id\r\n order['properties'] = [{'name': 'MAINTENANCE_WINDOW',\r\n 'value': str(datetime.datetime.now())}]\r\n if cpus or memory or nic_speed:\r\n self.client['Product_Order'].verifyOrder(order)\r\n self.client['Product_Order'].placeOrder(order)\r\n return True\r\n return False",
"def nfvi_resize_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('resize_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def upgrade_to_version(self, version, mixed_version=False, nodes=None):\n debug('Upgrading to ' + version)\n if not mixed_version:\n nodes = self.cluster.nodelist()\n\n for node in nodes:\n debug('Prepping node for shutdown: ' + node.name)\n node.flush()\n self._check_values()\n self._check_counter_values()\n \n for node in nodes:\n debug('Shutting down node: ' + node.name)\n time.sleep(.5)\n node.stop(wait_other_notice=False)\n\n if ENABLE_VNODES and version >= \"1.2\":\n self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 256})\n\n for node in nodes:\n debug('Upgrading node: ' + node.name)\n node.set_cassandra_dir(cassandra_version=version)\n node.start(wait_other_notice=True)\n time.sleep(.5)\n if not mixed_version:\n node.nodetool('upgradesstables')\n\n if ENABLE_VNODES and version >= \"1.2\" and not mixed_version:\n debug(\"Running shuffle\")\n self.node2.shuffle(\"create\")\n self.node2.shuffle(\"en\")\n\n for node in nodes:\n debug('Checking node: ' + node.name)\n if not mixed_version:\n self._write_values()\n self._check_values()\n\n self._increment_counter_value()\n time.sleep(0.5)\n self._check_counter_values()\n \n if not mixed_version:\n # Check we can bootstrap a new node on the upgraded cluster:\n debug(\"Adding a node to the cluster\")\n self.cluster.set_cassandra_dir(cassandra_version=version)\n nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))\n nnode.start(no_wait=False)\n nnode.watch_log_for(\"Bootstrap completed!\")\n debug(\"node should be up, but sleeping a bit to ensure...\")\n time.sleep(15)\n self._check_values()\n self._check_counter_values()\n \n if mixed_version:\n debug('Successfully upgraded part of the cluster to %s' % version) \n else:\n debug('Successfully upgraded to %s' % version)",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def request_shutdown(self, kernel_id, restart=False):",
"def upgrade_device(device, runtime):\n command = 'upgrade \"%s\" \"%s\"' % (device.udid, runtime.identifier)\n _run_command(command)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def upgradedb(self, args):\n upgrade_db(args.dbfile)",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def eurologic_kernel_version(self):\n return self._eurologic_kernel_version",
"def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def RebootMachine(instance_id):\n # Terminate the EC2 instance.\n ec2 = ec2_manager.EC2Manager()\n\n logging.info('Rebooting machine with instance id \"%s\".', instance_id)\n ec2.RebootInstances([instance_id])\n\n # Update the corresponding client machine model.\n client_machine.SetMachineStatus(instance_id, enum.MACHINE_STATUS.RUNNING)\n client_machine.IncrementRetryCount(instance_id)",
"async def switch_dbinstance_hawith_options_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')",
"def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def _upgradeDB():\n\n # Set current DB name\n currentDbName = basedefs.DB_NAME\n\n # Before db upgrade we want to make a backup of existing db in case we fail\n # The backup is performed on local system, even for remote DB.\n dbBackupFile = tempfile.mkstemp(suffix=\".sql\", dir=basedefs.DIR_DB_BACKUPS)[1]\n logging.debug(\"backing up %s db to file %s\"%(basedefs.DB_NAME, dbBackupFile))\n\n # Run db backup\n utils.backupDB(basedefs.DB_NAME, getDbUser(), dbBackupFile, getDbHostName(), getDbPort())\n\n # Rename DB first. If it fails - stop with \"active connections\" error.\n # if upgrade passes fine, rename the DB back.\n DB_NAME_TEMP = \"%s_%s\" % (basedefs.DB_NAME, utils.getCurrentDateTime())\n utils.renameDB(basedefs.DB_NAME, DB_NAME_TEMP)\n currentDbName = DB_NAME_TEMP\n\n # if we're here, DB was renamed.\n # upgrade script must run from dbscripts dir\n currentDir = os.getcwd()\n os.chdir(basedefs.DIR_DB_SCRIPTS)\n\n try:\n\n logging.debug(\"upgrading db schema\")\n cmd = [\n os.path.join(basedefs.DIR_DB_SCRIPTS, basedefs.FILE_DB_UPGRADE_SCRIPT),\n \"-u\", getDbUser(),\n \"-d\", DB_NAME_TEMP,\n \"-s\", getDbHostName(),\n \"-p\", getDbPort(),\n ]\n\n # Run upgrade.sh script to update existing db\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_UPGRADE_FAILED)\n\n # Log the successful upgrade\n logging.debug('Successfully upgraded %s DB'%(basedefs.DB_NAME))\n controller.MESSAGES.append(\"DB was upgraded to latest version. previous DB backup can be found at %s\"%(dbBackupFile))\n\n # Go back to previous dir\n os.chdir(currentDir)\n\n # Upgrade was successful, so rename the DB back.\n utils.renameDB(DB_NAME_TEMP, basedefs.DB_NAME)\n currentDbName = basedefs.DB_NAME\n\n # Update rpm version in vdc options\n utils.updateVDCOption(\"ProductRPMVersion\", utils.getRpmVersion(basedefs.ENGINE_RPM_NAME))\n except:\n # Upgrade failed! we need to restore the old db\n logging.debug(\"DB upgrade failed, restoring it to a previous state. DB was backed up to %s\", dbBackupFile)\n\n # Delete the original DB.\n # TODO: handle the case of failure - it should not stop the flow, but should write to the log\n sqlQuery=\"DROP DATABASE %s\" % currentDbName\n utils.execRemoteSqlCommand(getDbUser(), \\\n getDbHostName(), \\\n getDbPort(), \\\n basedefs.DB_POSTGRES, \\\n sqlQuery, False, \\\n output_messages.ERR_DB_DROP)\n\n # Restore the DB\n utils.restoreDB(getDbUser(), getDbHostName(), getDbPort(), dbBackupFile)\n\n raise Exception(output_messages.ERR_DB_UPGRADE_FAILED)",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def modify_instance_attribute(self, instance_id, attribute, value):\r\n # Allow a bool to be passed in for value of disableApiTermination\r\n if attribute == 'disableApiTermination':\r\n if isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n params = {'InstanceId' : instance_id,\r\n 'Attribute' : attribute,\r\n 'Value' : value}\r\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')",
"def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))",
"def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str",
"def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))",
"def reboot_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_status('RebootInstances', params)",
"def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])",
"def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")",
"def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def update_broker(AutoMinorVersionUpgrade=None, BrokerId=None, Configuration=None, EngineVersion=None, Logs=None):\n pass",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def set_attribute(\n attribute,\n attribute_value,\n instance_name=None,\n instance_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n attribute_list = [\n \"instanceType\",\n \"kernel\",\n \"ramdisk\",\n \"userData\",\n \"disableApiTermination\",\n \"instanceInitiatedShutdownBehavior\",\n \"rootDeviceName\",\n \"blockDeviceMapping\",\n \"productCodes\",\n \"sourceDestCheck\",\n \"groupSet\",\n \"ebsOptimized\",\n \"sriovNetSupport\",\n ]\n if not any((instance_name, instance_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: instance_name or\"\n \" instance_id.\"\n )\n if instance_name and instance_id:\n raise SaltInvocationError(\n \"Both instance_name and instance_id can not be specified in the same\"\n \" command.\"\n )\n if attribute not in attribute_list:\n raise SaltInvocationError(\n \"Attribute must be one of: {}.\".format(attribute_list)\n )\n try:\n if instance_name:\n instances = find_instances(\n name=instance_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n filters=filters,\n )\n if len(instances) != 1:\n raise CommandExecutionError(\n \"Found more than one EC2 instance matching the criteria.\"\n )\n instance_id = instances[0]\n attribute = conn.modify_instance_attribute(\n instance_id, attribute, attribute_value\n )\n if not attribute:\n return False\n return attribute\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return False",
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def test_rebuild_with_instance_in_stopped_state(self):\n # Initialize the VM to stopped state\n db.instance_update(self.context, self.inst.uuid,\n {\"vm_state\": vm_states.STOPPED})\n self.inst.vm_state = vm_states.STOPPED\n\n self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',\n lambda *a, **ka: True)\n\n self._rebuild(vm_states_is_stopped=True)\n\n # Check the vm state is reset to stopped\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['vm_state'], vm_states.STOPPED)",
"def modify_dbinstance_monitor(\n self,\n request: dds_20151201_models.ModifyDBInstanceMonitorRequest,\n ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_monitor_with_options(request, runtime)",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def downgrade(self, version):\n return NotImplemented",
"def svr_kernel(name, kernel, epsilon=None, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svr_hp_space(_name, epsilon))\n return scope.sklearn_SVR(**hp_space)",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def get_booted_kernel():\n try:\n return run(['/usr/bin/uname', '-r'])['stdout'].strip()\n except CalledProcessError as e:\n raise StopActorExecutionError(\n message='Unable to obtain release of the booted kernel.',\n details={'details': str(e), 'stderr': e.stderr}\n )",
"def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue"
] |
[
"0.79366666",
"0.713365",
"0.65763366",
"0.6048078",
"0.56803346",
"0.5636641",
"0.55429274",
"0.5415511",
"0.53714454",
"0.5316516",
"0.5281482",
"0.52180755",
"0.5102098",
"0.50040174",
"0.499213",
"0.49542388",
"0.49341667",
"0.48674172",
"0.4845689",
"0.4839142",
"0.48272142",
"0.4824538",
"0.48026252",
"0.477112",
"0.47707742",
"0.47630265",
"0.47553083",
"0.47503594",
"0.4742487",
"0.47246185",
"0.4720964",
"0.47063074",
"0.46855798",
"0.46849433",
"0.46758637",
"0.4671623",
"0.4661101",
"0.46582153",
"0.46518558",
"0.46464157",
"0.4629341",
"0.46221766",
"0.45972386",
"0.45969447",
"0.45960993",
"0.45649782",
"0.4551607",
"0.45503637",
"0.45361462",
"0.45238042",
"0.4509444",
"0.44977415",
"0.44950128",
"0.44823164",
"0.44705537",
"0.44632745",
"0.44631416",
"0.44348088",
"0.4422883",
"0.44182605",
"0.44134474",
"0.44128",
"0.44034097",
"0.43959242",
"0.43942264",
"0.43893892",
"0.43877074",
"0.4382195",
"0.4380125",
"0.43737435",
"0.43709952",
"0.43638647",
"0.43624297",
"0.4356412",
"0.43313396",
"0.4331092",
"0.43278086",
"0.43174165",
"0.43158725",
"0.43016684",
"0.42739943",
"0.42721885",
"0.42708334",
"0.4266886",
"0.42453477",
"0.4235935",
"0.42344356",
"0.42287815",
"0.42231098",
"0.4220793",
"0.42100167",
"0.42027402",
"0.419156",
"0.4169955",
"0.41693267",
"0.4158953",
"0.41468254",
"0.41467878",
"0.4138803",
"0.4137213"
] |
0.7097949
|
2
|
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
|
async def upgrade_dbinstance_kernel_version_with_options_async(
self,
request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,
runtime: util_models.RuntimeOptions,
) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.dbinstance_id):
query['DBInstanceId'] = request.dbinstance_id
if not UtilClient.is_unset(request.owner_account):
query['OwnerAccount'] = request.owner_account
if not UtilClient.is_unset(request.owner_id):
query['OwnerId'] = request.owner_id
if not UtilClient.is_unset(request.resource_owner_account):
query['ResourceOwnerAccount'] = request.resource_owner_account
if not UtilClient.is_unset(request.resource_owner_id):
query['ResourceOwnerId'] = request.resource_owner_id
if not UtilClient.is_unset(request.security_token):
query['SecurityToken'] = request.security_token
req = open_api_models.OpenApiRequest(
query=OpenApiUtilClient.query(query)
)
params = open_api_models.Params(
action='UpgradeDBInstanceKernelVersion',
version='2015-12-01',
protocol='HTTPS',
pathname='/',
method='POST',
auth_type='AK',
style='RPC',
req_body_type='formData',
body_type='json'
)
return TeaCore.from_map(
dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),
await self.call_api_async(params, req, runtime)
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def upgrade_dbinstance_kernel_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)",
"async def upgrade_dbinstance_kernel_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)",
"def upgrade_dbinstance_kernel_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart_kernel(self, kernel_id, now=False):",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass",
"def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass",
"def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version",
"def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )",
"def shutdown_kernel(self, kernel_id, now=False, restart=False):",
"async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def reboot_instance(InstanceId=None):\n pass",
"def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def restart_kernel(self, now=False, **kw):",
"def upgrade_kernel_without_openstack(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n non_openstack_nodes = [node for node in env.roledefs['all'] if node not in env.roledefs['openstack']]\n execute('create_installer_repo')\n execute('create_install_repo_without_openstack', *tgzs)\n nodes = []\n with settings(host_string=env.roledefs['cfgm'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n\n if ('red hat' in dist.lower() or 'centos linux' in dist.lower()) and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_package_installed_info(package, os_type, *non_openstack_nodes)\n if not nodes['not_installed']:\n print \"Nodes are already booted with expected version\"\n return\n if nodes['installed']:\n print \"Nodes (%s) are already booted in expected \"\\\n \"kernel version\" % \", \".join(nodes['installed'])\n\n execute(upgrade_kernel_node, *nodes['not_installed'], **kwargs)\n if reboot == 'yes':\n if env.host_string in nodes:\n nodes.remove(env.host_string).append(env.host_string)\n reboot_nodes(*nodes['not_installed'])\n else:\n print \"WARNING: Reboot Skipped as reboot=False; \"\\\n \"Reboot manually to avoid misconfiguration\"",
"async def restart_dbinstance_with_options_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade(self, old_version, new_version):\n pass",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def shutdown_kernel(self, now=False, restart=False):",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def shut_down_instance_if_swap_used():\n stats = statistics.report_statistics()\n swap_usage_total = stats.get('swap_usage_total', None)\n ram_usage_total = stats.get('ram_usage_total', None)\n\n logger.info('Checking swap and RAM usage...')\n\n if swap_usage_total and ram_usage_total:\n try:\n swap_usage_total = float(ram_usage_total)\n ram_usage_total = float(ram_usage_total)\n except:\n logger.error('Swap and RAM usage check failed during float() conversion')\n return\n\n if ram_usage_total > 70:\n if swap_usage_total > 10:\n # we're swapping very badly!\n logger.error('Swap and RAM usage is too high! Terminating instance')\n try:\n conn = boto.connect_ec2()\n instance_id = get_instance_metadata()['instance-id']\n conn.terminate_instances(instance_id, decrement_capacity=True)\n except Exception as e:\n logger.error('Failed to terminate instance, exception: %s' % str(e))",
"def downgrade_app_db(app, user, migration_version):\n ctx.logger.info(\n 'Downgrading %s DB to revision: %s', app.capitalize(),\n migration_version\n )\n run('db-migrate-down-to', app, user, migration_version)",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def update_instances_os ( ec2_conn, vpc, base_name, restart = False ) :\n instances = get_all_vpc_instances( ec2_conn, vpc )\n status = ssh_call_vpc( ec2_conn, base_name, instances, \"sudo yum -y update\", True )\n if restart and status == 0 :\n for instance in instances :\n instance.reboot( )",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def environment_needs_upgrade(self, db):\n\n return False",
"def deregister_rds_db_instance(RdsDbInstanceArn=None):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def upgrade_environment(self, db):\n\n pass",
"def upgrade_kernel_node(*args, **kwargs):\n for host_string in args:\n with settings(host_string=host_string):\n execute('create_install_repo_node', host_string)\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n print \"upgrading apparmor before upgrading kernel\"\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif version == '14.04':\n if 'version' in kwargs:\n kernel_ver = kwargs.get('version')\n else:\n kernel_ver = \"3.13.0-106\"\n print \"Installing \"+kernel_ver+\" kernel headers\"\n apt_install([\"linux-headers-\"+kernel_ver,\n \"linux-headers-\"+kernel_ver+\"-generic\"])\n print \"Upgrading the kernel to \"+kernel_ver\n apt_install([\"linux-image-\"+kernel_ver+\"-generic\",\n \"linux-image-extra-\"+kernel_ver+\"-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'red hat' in dist.lower() and version.startswith('7'):\n print \"Upgrading RHEL kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n print \"Upgrading Centos kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)'\n execute('set_grub_default_node', host_string, value=default_grub)",
"def shutdown_kernel(self, now=False, restart=False):\n pass",
"async def switch_dbinstance_ha_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return await self.switch_dbinstance_hawith_options_async(request, runtime)",
"def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def delete_instance(db_instance):\n rds = boto3.client('rds')\n rds.delete_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n SkipFinalSnapshot=True\n )",
"def upgrade(self, instance_id, cpus=None, memory=None,\r\n nic_speed=None, public=True):\r\n package_items = self._get_package_items()\r\n item_id = []\r\n if cpus:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'cpus', cpus, public)})\r\n if memory:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'memory', memory)})\r\n if nic_speed:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'nic_speed', nic_speed)})\r\n\r\n order = {}\r\n order['complexType'] = \\\r\n 'SoftLayer_Container_Product_Order_Virtual_Guest_Upgrade'\r\n order['virtualGuests'] = [{'id': int(instance_id)}]\r\n order['prices'] = item_id\r\n order['properties'] = [{'name': 'MAINTENANCE_WINDOW',\r\n 'value': str(datetime.datetime.now())}]\r\n if cpus or memory or nic_speed:\r\n self.client['Product_Order'].verifyOrder(order)\r\n self.client['Product_Order'].placeOrder(order)\r\n return True\r\n return False",
"def nfvi_resize_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('resize_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def upgrade_to_version(self, version, mixed_version=False, nodes=None):\n debug('Upgrading to ' + version)\n if not mixed_version:\n nodes = self.cluster.nodelist()\n\n for node in nodes:\n debug('Prepping node for shutdown: ' + node.name)\n node.flush()\n self._check_values()\n self._check_counter_values()\n \n for node in nodes:\n debug('Shutting down node: ' + node.name)\n time.sleep(.5)\n node.stop(wait_other_notice=False)\n\n if ENABLE_VNODES and version >= \"1.2\":\n self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 256})\n\n for node in nodes:\n debug('Upgrading node: ' + node.name)\n node.set_cassandra_dir(cassandra_version=version)\n node.start(wait_other_notice=True)\n time.sleep(.5)\n if not mixed_version:\n node.nodetool('upgradesstables')\n\n if ENABLE_VNODES and version >= \"1.2\" and not mixed_version:\n debug(\"Running shuffle\")\n self.node2.shuffle(\"create\")\n self.node2.shuffle(\"en\")\n\n for node in nodes:\n debug('Checking node: ' + node.name)\n if not mixed_version:\n self._write_values()\n self._check_values()\n\n self._increment_counter_value()\n time.sleep(0.5)\n self._check_counter_values()\n \n if not mixed_version:\n # Check we can bootstrap a new node on the upgraded cluster:\n debug(\"Adding a node to the cluster\")\n self.cluster.set_cassandra_dir(cassandra_version=version)\n nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))\n nnode.start(no_wait=False)\n nnode.watch_log_for(\"Bootstrap completed!\")\n debug(\"node should be up, but sleeping a bit to ensure...\")\n time.sleep(15)\n self._check_values()\n self._check_counter_values()\n \n if mixed_version:\n debug('Successfully upgraded part of the cluster to %s' % version) \n else:\n debug('Successfully upgraded to %s' % version)",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def request_shutdown(self, kernel_id, restart=False):",
"def upgrade_device(device, runtime):\n command = 'upgrade \"%s\" \"%s\"' % (device.udid, runtime.identifier)\n _run_command(command)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def upgradedb(self, args):\n upgrade_db(args.dbfile)",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def eurologic_kernel_version(self):\n return self._eurologic_kernel_version",
"def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def RebootMachine(instance_id):\n # Terminate the EC2 instance.\n ec2 = ec2_manager.EC2Manager()\n\n logging.info('Rebooting machine with instance id \"%s\".', instance_id)\n ec2.RebootInstances([instance_id])\n\n # Update the corresponding client machine model.\n client_machine.SetMachineStatus(instance_id, enum.MACHINE_STATUS.RUNNING)\n client_machine.IncrementRetryCount(instance_id)",
"async def switch_dbinstance_hawith_options_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')",
"def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def _upgradeDB():\n\n # Set current DB name\n currentDbName = basedefs.DB_NAME\n\n # Before db upgrade we want to make a backup of existing db in case we fail\n # The backup is performed on local system, even for remote DB.\n dbBackupFile = tempfile.mkstemp(suffix=\".sql\", dir=basedefs.DIR_DB_BACKUPS)[1]\n logging.debug(\"backing up %s db to file %s\"%(basedefs.DB_NAME, dbBackupFile))\n\n # Run db backup\n utils.backupDB(basedefs.DB_NAME, getDbUser(), dbBackupFile, getDbHostName(), getDbPort())\n\n # Rename DB first. If it fails - stop with \"active connections\" error.\n # if upgrade passes fine, rename the DB back.\n DB_NAME_TEMP = \"%s_%s\" % (basedefs.DB_NAME, utils.getCurrentDateTime())\n utils.renameDB(basedefs.DB_NAME, DB_NAME_TEMP)\n currentDbName = DB_NAME_TEMP\n\n # if we're here, DB was renamed.\n # upgrade script must run from dbscripts dir\n currentDir = os.getcwd()\n os.chdir(basedefs.DIR_DB_SCRIPTS)\n\n try:\n\n logging.debug(\"upgrading db schema\")\n cmd = [\n os.path.join(basedefs.DIR_DB_SCRIPTS, basedefs.FILE_DB_UPGRADE_SCRIPT),\n \"-u\", getDbUser(),\n \"-d\", DB_NAME_TEMP,\n \"-s\", getDbHostName(),\n \"-p\", getDbPort(),\n ]\n\n # Run upgrade.sh script to update existing db\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_UPGRADE_FAILED)\n\n # Log the successful upgrade\n logging.debug('Successfully upgraded %s DB'%(basedefs.DB_NAME))\n controller.MESSAGES.append(\"DB was upgraded to latest version. previous DB backup can be found at %s\"%(dbBackupFile))\n\n # Go back to previous dir\n os.chdir(currentDir)\n\n # Upgrade was successful, so rename the DB back.\n utils.renameDB(DB_NAME_TEMP, basedefs.DB_NAME)\n currentDbName = basedefs.DB_NAME\n\n # Update rpm version in vdc options\n utils.updateVDCOption(\"ProductRPMVersion\", utils.getRpmVersion(basedefs.ENGINE_RPM_NAME))\n except:\n # Upgrade failed! we need to restore the old db\n logging.debug(\"DB upgrade failed, restoring it to a previous state. DB was backed up to %s\", dbBackupFile)\n\n # Delete the original DB.\n # TODO: handle the case of failure - it should not stop the flow, but should write to the log\n sqlQuery=\"DROP DATABASE %s\" % currentDbName\n utils.execRemoteSqlCommand(getDbUser(), \\\n getDbHostName(), \\\n getDbPort(), \\\n basedefs.DB_POSTGRES, \\\n sqlQuery, False, \\\n output_messages.ERR_DB_DROP)\n\n # Restore the DB\n utils.restoreDB(getDbUser(), getDbHostName(), getDbPort(), dbBackupFile)\n\n raise Exception(output_messages.ERR_DB_UPGRADE_FAILED)",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def modify_instance_attribute(self, instance_id, attribute, value):\r\n # Allow a bool to be passed in for value of disableApiTermination\r\n if attribute == 'disableApiTermination':\r\n if isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n params = {'InstanceId' : instance_id,\r\n 'Attribute' : attribute,\r\n 'Value' : value}\r\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')",
"def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))",
"def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str",
"def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))",
"def reboot_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_status('RebootInstances', params)",
"def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])",
"def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")",
"def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def update_broker(AutoMinorVersionUpgrade=None, BrokerId=None, Configuration=None, EngineVersion=None, Logs=None):\n pass",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def set_attribute(\n attribute,\n attribute_value,\n instance_name=None,\n instance_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n attribute_list = [\n \"instanceType\",\n \"kernel\",\n \"ramdisk\",\n \"userData\",\n \"disableApiTermination\",\n \"instanceInitiatedShutdownBehavior\",\n \"rootDeviceName\",\n \"blockDeviceMapping\",\n \"productCodes\",\n \"sourceDestCheck\",\n \"groupSet\",\n \"ebsOptimized\",\n \"sriovNetSupport\",\n ]\n if not any((instance_name, instance_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: instance_name or\"\n \" instance_id.\"\n )\n if instance_name and instance_id:\n raise SaltInvocationError(\n \"Both instance_name and instance_id can not be specified in the same\"\n \" command.\"\n )\n if attribute not in attribute_list:\n raise SaltInvocationError(\n \"Attribute must be one of: {}.\".format(attribute_list)\n )\n try:\n if instance_name:\n instances = find_instances(\n name=instance_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n filters=filters,\n )\n if len(instances) != 1:\n raise CommandExecutionError(\n \"Found more than one EC2 instance matching the criteria.\"\n )\n instance_id = instances[0]\n attribute = conn.modify_instance_attribute(\n instance_id, attribute, attribute_value\n )\n if not attribute:\n return False\n return attribute\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return False",
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def test_rebuild_with_instance_in_stopped_state(self):\n # Initialize the VM to stopped state\n db.instance_update(self.context, self.inst.uuid,\n {\"vm_state\": vm_states.STOPPED})\n self.inst.vm_state = vm_states.STOPPED\n\n self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',\n lambda *a, **ka: True)\n\n self._rebuild(vm_states_is_stopped=True)\n\n # Check the vm state is reset to stopped\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['vm_state'], vm_states.STOPPED)",
"def modify_dbinstance_monitor(\n self,\n request: dds_20151201_models.ModifyDBInstanceMonitorRequest,\n ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_monitor_with_options(request, runtime)",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def downgrade(self, version):\n return NotImplemented",
"def svr_kernel(name, kernel, epsilon=None, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svr_hp_space(_name, epsilon))\n return scope.sklearn_SVR(**hp_space)",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def get_booted_kernel():\n try:\n return run(['/usr/bin/uname', '-r'])['stdout'].strip()\n except CalledProcessError as e:\n raise StopActorExecutionError(\n message='Unable to obtain release of the booted kernel.',\n details={'details': str(e), 'stderr': e.stderr}\n )",
"def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue"
] |
[
"0.7937624",
"0.71343994",
"0.709872",
"0.6047597",
"0.5681405",
"0.5637572",
"0.5542199",
"0.5414881",
"0.5371163",
"0.53170407",
"0.5281084",
"0.52176833",
"0.51020956",
"0.5003764",
"0.49939057",
"0.4953727",
"0.49351478",
"0.48668408",
"0.48451775",
"0.48387653",
"0.48285994",
"0.48237258",
"0.48027873",
"0.47704744",
"0.47701794",
"0.47630525",
"0.47566912",
"0.4751616",
"0.47425237",
"0.47247413",
"0.47207424",
"0.4706276",
"0.46868134",
"0.4684226",
"0.46761033",
"0.46724525",
"0.46617356",
"0.46565306",
"0.46503112",
"0.4645479",
"0.46289146",
"0.4622591",
"0.45978042",
"0.45957693",
"0.45947057",
"0.4563984",
"0.4551956",
"0.45489565",
"0.45349884",
"0.45234072",
"0.45104504",
"0.44982845",
"0.4495037",
"0.44809455",
"0.44700724",
"0.44636273",
"0.44625857",
"0.443491",
"0.4422159",
"0.44189516",
"0.44135535",
"0.44127917",
"0.44031233",
"0.4395045",
"0.4394592",
"0.43914622",
"0.43885326",
"0.43803445",
"0.43803072",
"0.43735373",
"0.4370827",
"0.43633425",
"0.43612805",
"0.43567395",
"0.43317157",
"0.4330571",
"0.43272725",
"0.43168676",
"0.43149632",
"0.43012202",
"0.42748645",
"0.42707816",
"0.4270063",
"0.4267225",
"0.42447582",
"0.42346933",
"0.42340645",
"0.422871",
"0.42229092",
"0.42207265",
"0.4210093",
"0.42024067",
"0.41893435",
"0.4169557",
"0.41690236",
"0.41590607",
"0.41487694",
"0.41471097",
"0.41398677",
"0.41364798"
] |
0.6576928
|
3
|
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
|
def upgrade_dbinstance_kernel_version(
self,
request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,
) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:
runtime = util_models.RuntimeOptions()
return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def upgrade_dbinstance_kernel_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)",
"def upgrade_dbinstance_kernel_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"async def upgrade_dbinstance_kernel_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart_kernel(self, kernel_id, now=False):",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass",
"def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass",
"def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version",
"def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )",
"def shutdown_kernel(self, kernel_id, now=False, restart=False):",
"async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def reboot_instance(InstanceId=None):\n pass",
"def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def restart_kernel(self, now=False, **kw):",
"def upgrade_kernel_without_openstack(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n non_openstack_nodes = [node for node in env.roledefs['all'] if node not in env.roledefs['openstack']]\n execute('create_installer_repo')\n execute('create_install_repo_without_openstack', *tgzs)\n nodes = []\n with settings(host_string=env.roledefs['cfgm'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n\n if ('red hat' in dist.lower() or 'centos linux' in dist.lower()) and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_package_installed_info(package, os_type, *non_openstack_nodes)\n if not nodes['not_installed']:\n print \"Nodes are already booted with expected version\"\n return\n if nodes['installed']:\n print \"Nodes (%s) are already booted in expected \"\\\n \"kernel version\" % \", \".join(nodes['installed'])\n\n execute(upgrade_kernel_node, *nodes['not_installed'], **kwargs)\n if reboot == 'yes':\n if env.host_string in nodes:\n nodes.remove(env.host_string).append(env.host_string)\n reboot_nodes(*nodes['not_installed'])\n else:\n print \"WARNING: Reboot Skipped as reboot=False; \"\\\n \"Reboot manually to avoid misconfiguration\"",
"async def restart_dbinstance_with_options_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade(self, old_version, new_version):\n pass",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def shutdown_kernel(self, now=False, restart=False):",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def shut_down_instance_if_swap_used():\n stats = statistics.report_statistics()\n swap_usage_total = stats.get('swap_usage_total', None)\n ram_usage_total = stats.get('ram_usage_total', None)\n\n logger.info('Checking swap and RAM usage...')\n\n if swap_usage_total and ram_usage_total:\n try:\n swap_usage_total = float(ram_usage_total)\n ram_usage_total = float(ram_usage_total)\n except:\n logger.error('Swap and RAM usage check failed during float() conversion')\n return\n\n if ram_usage_total > 70:\n if swap_usage_total > 10:\n # we're swapping very badly!\n logger.error('Swap and RAM usage is too high! Terminating instance')\n try:\n conn = boto.connect_ec2()\n instance_id = get_instance_metadata()['instance-id']\n conn.terminate_instances(instance_id, decrement_capacity=True)\n except Exception as e:\n logger.error('Failed to terminate instance, exception: %s' % str(e))",
"def downgrade_app_db(app, user, migration_version):\n ctx.logger.info(\n 'Downgrading %s DB to revision: %s', app.capitalize(),\n migration_version\n )\n run('db-migrate-down-to', app, user, migration_version)",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def update_instances_os ( ec2_conn, vpc, base_name, restart = False ) :\n instances = get_all_vpc_instances( ec2_conn, vpc )\n status = ssh_call_vpc( ec2_conn, base_name, instances, \"sudo yum -y update\", True )\n if restart and status == 0 :\n for instance in instances :\n instance.reboot( )",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def environment_needs_upgrade(self, db):\n\n return False",
"def deregister_rds_db_instance(RdsDbInstanceArn=None):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def upgrade_environment(self, db):\n\n pass",
"def upgrade_kernel_node(*args, **kwargs):\n for host_string in args:\n with settings(host_string=host_string):\n execute('create_install_repo_node', host_string)\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n print \"upgrading apparmor before upgrading kernel\"\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif version == '14.04':\n if 'version' in kwargs:\n kernel_ver = kwargs.get('version')\n else:\n kernel_ver = \"3.13.0-106\"\n print \"Installing \"+kernel_ver+\" kernel headers\"\n apt_install([\"linux-headers-\"+kernel_ver,\n \"linux-headers-\"+kernel_ver+\"-generic\"])\n print \"Upgrading the kernel to \"+kernel_ver\n apt_install([\"linux-image-\"+kernel_ver+\"-generic\",\n \"linux-image-extra-\"+kernel_ver+\"-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'red hat' in dist.lower() and version.startswith('7'):\n print \"Upgrading RHEL kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n print \"Upgrading Centos kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)'\n execute('set_grub_default_node', host_string, value=default_grub)",
"def shutdown_kernel(self, now=False, restart=False):\n pass",
"async def switch_dbinstance_ha_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return await self.switch_dbinstance_hawith_options_async(request, runtime)",
"def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def delete_instance(db_instance):\n rds = boto3.client('rds')\n rds.delete_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n SkipFinalSnapshot=True\n )",
"def upgrade(self, instance_id, cpus=None, memory=None,\r\n nic_speed=None, public=True):\r\n package_items = self._get_package_items()\r\n item_id = []\r\n if cpus:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'cpus', cpus, public)})\r\n if memory:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'memory', memory)})\r\n if nic_speed:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'nic_speed', nic_speed)})\r\n\r\n order = {}\r\n order['complexType'] = \\\r\n 'SoftLayer_Container_Product_Order_Virtual_Guest_Upgrade'\r\n order['virtualGuests'] = [{'id': int(instance_id)}]\r\n order['prices'] = item_id\r\n order['properties'] = [{'name': 'MAINTENANCE_WINDOW',\r\n 'value': str(datetime.datetime.now())}]\r\n if cpus or memory or nic_speed:\r\n self.client['Product_Order'].verifyOrder(order)\r\n self.client['Product_Order'].placeOrder(order)\r\n return True\r\n return False",
"def nfvi_resize_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('resize_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def upgrade_to_version(self, version, mixed_version=False, nodes=None):\n debug('Upgrading to ' + version)\n if not mixed_version:\n nodes = self.cluster.nodelist()\n\n for node in nodes:\n debug('Prepping node for shutdown: ' + node.name)\n node.flush()\n self._check_values()\n self._check_counter_values()\n \n for node in nodes:\n debug('Shutting down node: ' + node.name)\n time.sleep(.5)\n node.stop(wait_other_notice=False)\n\n if ENABLE_VNODES and version >= \"1.2\":\n self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 256})\n\n for node in nodes:\n debug('Upgrading node: ' + node.name)\n node.set_cassandra_dir(cassandra_version=version)\n node.start(wait_other_notice=True)\n time.sleep(.5)\n if not mixed_version:\n node.nodetool('upgradesstables')\n\n if ENABLE_VNODES and version >= \"1.2\" and not mixed_version:\n debug(\"Running shuffle\")\n self.node2.shuffle(\"create\")\n self.node2.shuffle(\"en\")\n\n for node in nodes:\n debug('Checking node: ' + node.name)\n if not mixed_version:\n self._write_values()\n self._check_values()\n\n self._increment_counter_value()\n time.sleep(0.5)\n self._check_counter_values()\n \n if not mixed_version:\n # Check we can bootstrap a new node on the upgraded cluster:\n debug(\"Adding a node to the cluster\")\n self.cluster.set_cassandra_dir(cassandra_version=version)\n nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))\n nnode.start(no_wait=False)\n nnode.watch_log_for(\"Bootstrap completed!\")\n debug(\"node should be up, but sleeping a bit to ensure...\")\n time.sleep(15)\n self._check_values()\n self._check_counter_values()\n \n if mixed_version:\n debug('Successfully upgraded part of the cluster to %s' % version) \n else:\n debug('Successfully upgraded to %s' % version)",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def request_shutdown(self, kernel_id, restart=False):",
"def upgrade_device(device, runtime):\n command = 'upgrade \"%s\" \"%s\"' % (device.udid, runtime.identifier)\n _run_command(command)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def upgradedb(self, args):\n upgrade_db(args.dbfile)",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def eurologic_kernel_version(self):\n return self._eurologic_kernel_version",
"def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def RebootMachine(instance_id):\n # Terminate the EC2 instance.\n ec2 = ec2_manager.EC2Manager()\n\n logging.info('Rebooting machine with instance id \"%s\".', instance_id)\n ec2.RebootInstances([instance_id])\n\n # Update the corresponding client machine model.\n client_machine.SetMachineStatus(instance_id, enum.MACHINE_STATUS.RUNNING)\n client_machine.IncrementRetryCount(instance_id)",
"async def switch_dbinstance_hawith_options_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')",
"def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def _upgradeDB():\n\n # Set current DB name\n currentDbName = basedefs.DB_NAME\n\n # Before db upgrade we want to make a backup of existing db in case we fail\n # The backup is performed on local system, even for remote DB.\n dbBackupFile = tempfile.mkstemp(suffix=\".sql\", dir=basedefs.DIR_DB_BACKUPS)[1]\n logging.debug(\"backing up %s db to file %s\"%(basedefs.DB_NAME, dbBackupFile))\n\n # Run db backup\n utils.backupDB(basedefs.DB_NAME, getDbUser(), dbBackupFile, getDbHostName(), getDbPort())\n\n # Rename DB first. If it fails - stop with \"active connections\" error.\n # if upgrade passes fine, rename the DB back.\n DB_NAME_TEMP = \"%s_%s\" % (basedefs.DB_NAME, utils.getCurrentDateTime())\n utils.renameDB(basedefs.DB_NAME, DB_NAME_TEMP)\n currentDbName = DB_NAME_TEMP\n\n # if we're here, DB was renamed.\n # upgrade script must run from dbscripts dir\n currentDir = os.getcwd()\n os.chdir(basedefs.DIR_DB_SCRIPTS)\n\n try:\n\n logging.debug(\"upgrading db schema\")\n cmd = [\n os.path.join(basedefs.DIR_DB_SCRIPTS, basedefs.FILE_DB_UPGRADE_SCRIPT),\n \"-u\", getDbUser(),\n \"-d\", DB_NAME_TEMP,\n \"-s\", getDbHostName(),\n \"-p\", getDbPort(),\n ]\n\n # Run upgrade.sh script to update existing db\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_UPGRADE_FAILED)\n\n # Log the successful upgrade\n logging.debug('Successfully upgraded %s DB'%(basedefs.DB_NAME))\n controller.MESSAGES.append(\"DB was upgraded to latest version. previous DB backup can be found at %s\"%(dbBackupFile))\n\n # Go back to previous dir\n os.chdir(currentDir)\n\n # Upgrade was successful, so rename the DB back.\n utils.renameDB(DB_NAME_TEMP, basedefs.DB_NAME)\n currentDbName = basedefs.DB_NAME\n\n # Update rpm version in vdc options\n utils.updateVDCOption(\"ProductRPMVersion\", utils.getRpmVersion(basedefs.ENGINE_RPM_NAME))\n except:\n # Upgrade failed! we need to restore the old db\n logging.debug(\"DB upgrade failed, restoring it to a previous state. DB was backed up to %s\", dbBackupFile)\n\n # Delete the original DB.\n # TODO: handle the case of failure - it should not stop the flow, but should write to the log\n sqlQuery=\"DROP DATABASE %s\" % currentDbName\n utils.execRemoteSqlCommand(getDbUser(), \\\n getDbHostName(), \\\n getDbPort(), \\\n basedefs.DB_POSTGRES, \\\n sqlQuery, False, \\\n output_messages.ERR_DB_DROP)\n\n # Restore the DB\n utils.restoreDB(getDbUser(), getDbHostName(), getDbPort(), dbBackupFile)\n\n raise Exception(output_messages.ERR_DB_UPGRADE_FAILED)",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def modify_instance_attribute(self, instance_id, attribute, value):\r\n # Allow a bool to be passed in for value of disableApiTermination\r\n if attribute == 'disableApiTermination':\r\n if isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n params = {'InstanceId' : instance_id,\r\n 'Attribute' : attribute,\r\n 'Value' : value}\r\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')",
"def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))",
"def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str",
"def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))",
"def reboot_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_status('RebootInstances', params)",
"def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])",
"def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")",
"def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def update_broker(AutoMinorVersionUpgrade=None, BrokerId=None, Configuration=None, EngineVersion=None, Logs=None):\n pass",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def set_attribute(\n attribute,\n attribute_value,\n instance_name=None,\n instance_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n attribute_list = [\n \"instanceType\",\n \"kernel\",\n \"ramdisk\",\n \"userData\",\n \"disableApiTermination\",\n \"instanceInitiatedShutdownBehavior\",\n \"rootDeviceName\",\n \"blockDeviceMapping\",\n \"productCodes\",\n \"sourceDestCheck\",\n \"groupSet\",\n \"ebsOptimized\",\n \"sriovNetSupport\",\n ]\n if not any((instance_name, instance_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: instance_name or\"\n \" instance_id.\"\n )\n if instance_name and instance_id:\n raise SaltInvocationError(\n \"Both instance_name and instance_id can not be specified in the same\"\n \" command.\"\n )\n if attribute not in attribute_list:\n raise SaltInvocationError(\n \"Attribute must be one of: {}.\".format(attribute_list)\n )\n try:\n if instance_name:\n instances = find_instances(\n name=instance_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n filters=filters,\n )\n if len(instances) != 1:\n raise CommandExecutionError(\n \"Found more than one EC2 instance matching the criteria.\"\n )\n instance_id = instances[0]\n attribute = conn.modify_instance_attribute(\n instance_id, attribute, attribute_value\n )\n if not attribute:\n return False\n return attribute\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return False",
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def test_rebuild_with_instance_in_stopped_state(self):\n # Initialize the VM to stopped state\n db.instance_update(self.context, self.inst.uuid,\n {\"vm_state\": vm_states.STOPPED})\n self.inst.vm_state = vm_states.STOPPED\n\n self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',\n lambda *a, **ka: True)\n\n self._rebuild(vm_states_is_stopped=True)\n\n # Check the vm state is reset to stopped\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['vm_state'], vm_states.STOPPED)",
"def modify_dbinstance_monitor(\n self,\n request: dds_20151201_models.ModifyDBInstanceMonitorRequest,\n ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_monitor_with_options(request, runtime)",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def downgrade(self, version):\n return NotImplemented",
"def svr_kernel(name, kernel, epsilon=None, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svr_hp_space(_name, epsilon))\n return scope.sklearn_SVR(**hp_space)",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def get_booted_kernel():\n try:\n return run(['/usr/bin/uname', '-r'])['stdout'].strip()\n except CalledProcessError as e:\n raise StopActorExecutionError(\n message='Unable to obtain release of the booted kernel.',\n details={'details': str(e), 'stderr': e.stderr}\n )",
"def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue"
] |
[
"0.713365",
"0.7097949",
"0.65763366",
"0.6048078",
"0.56803346",
"0.5636641",
"0.55429274",
"0.5415511",
"0.53714454",
"0.5316516",
"0.5281482",
"0.52180755",
"0.5102098",
"0.50040174",
"0.499213",
"0.49542388",
"0.49341667",
"0.48674172",
"0.4845689",
"0.4839142",
"0.48272142",
"0.4824538",
"0.48026252",
"0.477112",
"0.47707742",
"0.47630265",
"0.47553083",
"0.47503594",
"0.4742487",
"0.47246185",
"0.4720964",
"0.47063074",
"0.46855798",
"0.46849433",
"0.46758637",
"0.4671623",
"0.4661101",
"0.46582153",
"0.46518558",
"0.46464157",
"0.4629341",
"0.46221766",
"0.45972386",
"0.45969447",
"0.45960993",
"0.45649782",
"0.4551607",
"0.45503637",
"0.45361462",
"0.45238042",
"0.4509444",
"0.44977415",
"0.44950128",
"0.44823164",
"0.44705537",
"0.44632745",
"0.44631416",
"0.44348088",
"0.4422883",
"0.44182605",
"0.44134474",
"0.44128",
"0.44034097",
"0.43959242",
"0.43942264",
"0.43893892",
"0.43877074",
"0.4382195",
"0.4380125",
"0.43737435",
"0.43709952",
"0.43638647",
"0.43624297",
"0.4356412",
"0.43313396",
"0.4331092",
"0.43278086",
"0.43174165",
"0.43158725",
"0.43016684",
"0.42739943",
"0.42721885",
"0.42708334",
"0.4266886",
"0.42453477",
"0.4235935",
"0.42344356",
"0.42287815",
"0.42231098",
"0.4220793",
"0.42100167",
"0.42027402",
"0.419156",
"0.4169955",
"0.41693267",
"0.4158953",
"0.41468254",
"0.41467878",
"0.4138803",
"0.4137213"
] |
0.79366666
|
0
|
When you call the UpgradeDBInstanceKernelVersion operation, the instance must be in the Running state. > The UpgradeDBInstanceKernelVersion operation is applicable to replica set and sharded cluster instances, but not to standalone instances. > The instance will be restarted once during the upgrade. Call this operation during offpeak hours.
|
async def upgrade_dbinstance_kernel_version_async(
self,
request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,
) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:
runtime = util_models.RuntimeOptions()
return await self.upgrade_dbinstance_kernel_version_with_options_async(request, runtime)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def upgrade_dbinstance_kernel_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_kernel_version_with_options(request, runtime)",
"def upgrade_dbinstance_kernel_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"async def upgrade_dbinstance_kernel_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceKernelVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceKernelVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceKernelVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceKernelVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade_dbinstance_engine_version(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return self.upgrade_dbinstance_engine_version_with_options(request, runtime)",
"def upgrade_kernel(**kwargs):\n execute(\"upgrade_kernel_node\", env.host_string, **kwargs)",
"def upgrade_kernel():\n execute(\"upgrade_kernel_node\", env.host_string)",
"def update_rds_db_instance(RdsDbInstanceArn=None, DbUser=None, DbPassword=None):\n pass",
"async def upgrade_dbinstance_engine_version_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n runtime = util_models.RuntimeOptions()\n return await self.upgrade_dbinstance_engine_version_with_options_async(request, runtime)",
"def restart_dbinstance(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return self.restart_dbinstance_with_options(request, runtime)",
"def restart_kernel(self, kernel_id, now=False):",
"def RebootInstance(self, instance):\n raise HypervisorError(\"The chroot manager doesn't implement the\"\n \" reboot functionality\")",
"def upgrade_dbinstance_engine_version_with_options(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n self.call_api(params, req, runtime)\n )",
"def update_notebook_instance(NotebookInstanceName=None, InstanceType=None, RoleArn=None, LifecycleConfigName=None, DisassociateLifecycleConfig=None, VolumeSizeInGB=None, DefaultCodeRepository=None, AdditionalCodeRepositories=None, AcceleratorTypes=None, DisassociateAcceleratorTypes=None, DisassociateDefaultCodeRepository=None, DisassociateAdditionalCodeRepositories=None):\n pass",
"def update_instance(InstanceId=None, LayerIds=None, InstanceType=None, AutoScalingType=None, Hostname=None, Os=None, AmiId=None, SshKeyName=None, Architecture=None, InstallUpdatesOnBoot=None, EbsOptimized=None, AgentVersion=None):\n pass",
"def kernel_version(self, kernel_version):\n\n self._kernel_version = kernel_version",
"def restart_dbinstance_with_options(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n self.call_api(params, req, runtime)\n )",
"def shutdown_kernel(self, kernel_id, now=False, restart=False):",
"async def restart_dbinstance_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n runtime = util_models.RuntimeOptions()\n return await self.restart_dbinstance_with_options_async(request, runtime)",
"async def upgrade_dbinstance_engine_version_with_options_async(\n self,\n request: dds_20151201_models.UpgradeDBInstanceEngineVersionRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.UpgradeDBInstanceEngineVersionResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.engine_version):\n query['EngineVersion'] = request.engine_version\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='UpgradeDBInstanceEngineVersion',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.UpgradeDBInstanceEngineVersionResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def reboot_instance(InstanceId=None):\n pass",
"def upgrade_kernel_all(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n execute('create_installer_repo')\n execute('create_install_repo', *tgzs)\n nodes = []\n kernel_ver = kwargs.get('version')\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n elif version == '14.04':\n if kernel_ver is None:\n kernel_ver='3.13.0-106'\n (package, os_type) = ('linux-image-'+kernel_ver+'-generic', 'ubuntu')\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'centoslinux')\n elif 'red hat' in dist.lower() and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes, **kwargs)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def reboot(self, instance):\n try:\n out, err = utils.execute('sudo', 'vzctl', 'restart',\n instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to restart container: %d' %\n instance['id'])",
"def switch_dbinstance_ha(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return self.switch_dbinstance_hawith_options(request, runtime)",
"def reboot_instance(instance_id):\n # Instantiate the service resource object\n ec2_resource = session.resource('ec2', region_name=region)\n try:\n # Reboot an instance\n response = ec2_resource.Instance(instance_id).reboot(DryRun=False)\n print(response)\n print(\"\\nSuccessfully rebooting instance: \", instance_id)\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"InvalidInstanceID.Malformed\":\n print(\"Error: Invalid instance id!!\")\n else:\n raise\n return",
"def restart_ec2_instance(client, instance_id):\n\n response = client.reboot_instances(\n InstanceIds=[instance_id],\n )\n return response",
"def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')",
"def get_kernel_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def upgrade_kernel_all(reboot='yes'):\n execute('pre_check')\n execute('create_install_repo')\n nodes = []\n with settings(host_string=env.roledefs['all'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n (package, os_type) = ('linux-image-3.13.0-34-generic', 'ubuntu')\n elif version == '14.04':\n (package, os_type) = ('linux-image-3.13.0-40-generic', 'ubuntu')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_nodes_to_upgrade_pkg(package, os_type, *env.roledefs['all'])\n if not nodes:\n print \"kernel is already of expected version\"\n return\n execute(upgrade_kernel_node, *nodes)\n if reboot == 'yes':\n node_list_except_build = list(nodes)\n if env.host_string in nodes:\n node_list_except_build.remove(env.host_string)\n reboot_nodes(*node_list_except_build)\n reboot_nodes(env.host_string)\n else:\n reboot_nodes(*nodes)",
"def switch_dbinstance_hawith_options(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n self.call_api(params, req, runtime)\n )",
"async def _upgrade_db(self) -> None:\n cur_version = await self._get_db_version()\n for n in range(cur_version + 1, sql_data.CUR_VERSION + 1):\n log.msg('Upgrading database to version %d' % n)\n if n in sql_data.SQL_UPGRADES:\n for command in sql_data.SQL_UPGRADES[n]:\n await self.operation(command)\n if cur_version != sql_data.CUR_VERSION:\n await self._set_db_version(sql_data.CUR_VERSION)",
"def safe_upgrade():\n goviewbe.upgrade_db(current_app)",
"def restart_kernel(self, now=False, **kw):",
"def upgrade_kernel_without_openstack(*tgzs, **kwargs):\n reboot = kwargs.get('reboot', 'yes')\n non_openstack_nodes = [node for node in env.roledefs['all'] if node not in env.roledefs['openstack']]\n execute('create_installer_repo')\n execute('create_install_repo_without_openstack', *tgzs)\n nodes = []\n with settings(host_string=env.roledefs['cfgm'][0], warn_only=True):\n dist, version, extra = get_linux_distro()\n\n if ('red hat' in dist.lower() or 'centos linux' in dist.lower()) and version.startswith('7'):\n (package, os_type) = ('kernel-3.10.0-327.10.1.el7.x86_64', 'redhat')\n else:\n raise RuntimeError(\"Unsupported platfrom (%s, %s, %s) for\"\n \" kernel upgrade.\" % (dist, version, extra))\n nodes = get_package_installed_info(package, os_type, *non_openstack_nodes)\n if not nodes['not_installed']:\n print \"Nodes are already booted with expected version\"\n return\n if nodes['installed']:\n print \"Nodes (%s) are already booted in expected \"\\\n \"kernel version\" % \", \".join(nodes['installed'])\n\n execute(upgrade_kernel_node, *nodes['not_installed'], **kwargs)\n if reboot == 'yes':\n if env.host_string in nodes:\n nodes.remove(env.host_string).append(env.host_string)\n reboot_nodes(*nodes['not_installed'])\n else:\n print \"WARNING: Reboot Skipped as reboot=False; \"\\\n \"Reboot manually to avoid misconfiguration\"",
"async def restart_dbinstance_with_options_async(\n self,\n request: dds_20151201_models.RestartDBInstanceRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.RestartDBInstanceResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='RestartDBInstance',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.RestartDBInstanceResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade(self, old_version, new_version):\n pass",
"def upgrade_kernel_node(*args):\n for host_string in args:\n with settings(host_string=host_string):\n dist, version, extra = get_linux_distro()\n print \"upgrading apparmor before upgrading kernel\"\n if version == '12.04':\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n elif version == '14.04':\n print \"Installing 3.13.0-40 kernel headers\"\n apt_install([\"linux-headers-3.13.0-40\",\n \"linux-headers-3.13.0-40-generic\"])\n print \"Upgrading the kernel to 3.13.0-40\"\n apt_install([\"linux-image-3.13.0-40-generic\",\n \"linux-image-extra-3.13.0-40-generic\"])",
"def shutdown_kernel(self, now=False, restart=False):",
"def _stop(self, instance):\n try:\n _, err = utils.execute('sudo', 'vzctl', 'stop', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError:\n raise exception.Error('Failed to stop %s' % instance['id'])\n\n # Update instance state\n try:\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.SHUTDOWN)\n except exception.DBError as err:\n LOG.error(err)\n raise exception.Error('Failed to update db for %s' % instance['id'])\n \n return True",
"def _sync_instance_power_state(self, context, db_instance, vm_power_state,\n use_slave=False):\n\n # We re-query the DB to get the latest instance info to minimize\n # (not eliminate) race condition.\n db_instance.refresh(use_slave=use_slave)\n db_power_state = db_instance.power_state\n vm_state = db_instance.vm_state\n\n if self.host != db_instance.host:\n # on the sending end of cloud-cloud _sync_power_state\n # may have yielded to the greenthread performing a live\n # migration; this in turn has changed the resident-host\n # for the VM; However, the instance is still active, it\n # is just in the process of migrating to another host.\n # This implies that the cloud source must relinquish\n # control to the cloud destination.\n LOG.info(_LI(\"During the sync_power process the \"\n \"instance has moved from \"\n \"host %(src)s to host %(dst)s\"),\n {'src': db_instance.host,\n 'dst': self.host},\n instance=db_instance)\n return\n elif db_instance.task_state is not None:\n # on the receiving end of cloud-cloud, it could happen\n # that the DB instance already report the new resident\n # but the actual VM has not showed up on the hypervisor\n # yet. In this case, let's allow the loop to continue\n # and run the state sync in a later round\n LOG.info(_LI(\"During sync_power_state the instance has a \"\n \"pending task (%(task)s). Skip.\"),\n {'task': db_instance.task_state},\n instance=db_instance)\n return\n\n orig_db_power_state = db_power_state\n if vm_power_state != db_power_state:\n LOG.info(_LI('During _sync_instance_power_state the DB '\n 'power_state (%(db_power_state)s) does not match '\n 'the vm_power_state from the hypervisor '\n '(%(vm_power_state)s). Updating power_state in the '\n 'DB to match the hypervisor.'),\n {'db_power_state': db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n # power_state is always updated from hypervisor to db\n db_instance.power_state = vm_power_state\n db_instance.save()\n db_power_state = vm_power_state\n\n # Note(maoy): Now resolve the discrepancy between vm_state and\n # vm_power_state. We go through all possible vm_states.\n if vm_state in (vm_states.BUILDING,\n vm_states.RESCUED,\n vm_states.RESIZED,\n vm_states.SUSPENDED,\n vm_states.ERROR):\n # TODO(maoy): we ignore these vm_state for now.\n pass\n elif vm_state == vm_states.ACTIVE:\n # The only rational power state should be RUNNING\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance shutdown by itself. Calling the \"\n \"stop API. Current vm_state: %(vm_state)s, \"\n \"current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # Note(maoy): here we call the API instead of\n # brutally updating the vm_state in the database\n # to allow all the hooks and checks to be performed.\n if db_instance.shutdown_terminate:\n self.compute_api.delete(context, db_instance)\n else:\n self.compute_api.stop(context, db_instance)\n except Exception:\n # Note(maoy): there is no need to propagate the error\n # because the same power_state will be retrieved next\n # time and retried.\n # For example, there might be another task scheduled.\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.SUSPENDED:\n LOG.warning(_LW(\"Instance is suspended unexpectedly. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_power_state == power_state.PAUSED:\n # Note(maoy): a VM may get into the paused state not only\n # because the user request via API calls, but also\n # due to (temporary) external instrumentations.\n # Before the virt layer can reliably report the reason,\n # we simply ignore the state discrepancy. In many cases,\n # the VM state will go back to running after the external\n # instrumentation is done. See bug 1097806 for details.\n LOG.warning(_LW(\"Instance is paused unexpectedly. Ignore.\"),\n instance=db_instance)\n elif vm_power_state == power_state.NOSTATE:\n # Occasionally, depending on the status of the hypervisor,\n # which could be restarting for example, an instance may\n # not be found. Therefore just log the condition.\n LOG.warning(_LW(\"Instance is unexpectedly not found. Ignore.\"),\n instance=db_instance)\n elif vm_state == vm_states.STOPPED:\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Instance is not stopped. Calling \"\n \"the stop API. Current vm_state: %(vm_state)s,\"\n \" current task_state: %(task_state)s, \"\n \"original DB power_state: %(db_power_state)s, \"\n \"current VM power_state: %(vm_power_state)s\"),\n {'vm_state': vm_state,\n 'task_state': db_instance.task_state,\n 'db_power_state': orig_db_power_state,\n 'vm_power_state': vm_power_state},\n instance=db_instance)\n try:\n # NOTE(russellb) Force the stop, because normally the\n # cloud API would not allow an attempt to stop a stopped\n # instance.\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state == vm_states.PAUSED:\n if vm_power_state in (power_state.SHUTDOWN,\n power_state.CRASHED):\n LOG.warning(_LW(\"Paused instance shutdown by itself. Calling \"\n \"the stop API.\"), instance=db_instance)\n try:\n self.compute_api.force_stop(context, db_instance)\n except Exception:\n LOG.exception(_LE(\"error during stop() in \"\n \"sync_power_state.\"),\n instance=db_instance)\n elif vm_state in (vm_states.SOFT_DELETED,\n vm_states.DELETED):\n if vm_power_state not in (power_state.NOSTATE,\n power_state.SHUTDOWN):\n # Note(maoy): this should be taken care of periodically in\n # _cleanup_running_deleted_instances().\n LOG.warning(_LW(\"Instance is not (soft-)deleted.\"),\n instance=db_instance)",
"def shut_down_instance_if_swap_used():\n stats = statistics.report_statistics()\n swap_usage_total = stats.get('swap_usage_total', None)\n ram_usage_total = stats.get('ram_usage_total', None)\n\n logger.info('Checking swap and RAM usage...')\n\n if swap_usage_total and ram_usage_total:\n try:\n swap_usage_total = float(ram_usage_total)\n ram_usage_total = float(ram_usage_total)\n except:\n logger.error('Swap and RAM usage check failed during float() conversion')\n return\n\n if ram_usage_total > 70:\n if swap_usage_total > 10:\n # we're swapping very badly!\n logger.error('Swap and RAM usage is too high! Terminating instance')\n try:\n conn = boto.connect_ec2()\n instance_id = get_instance_metadata()['instance-id']\n conn.terminate_instances(instance_id, decrement_capacity=True)\n except Exception as e:\n logger.error('Failed to terminate instance, exception: %s' % str(e))",
"def downgrade_app_db(app, user, migration_version):\n ctx.logger.info(\n 'Downgrading %s DB to revision: %s', app.capitalize(),\n migration_version\n )\n run('db-migrate-down-to', app, user, migration_version)",
"def upgrade(self, version):\n try:\n version = int(version)\n except:\n if version != 'latest':\n self.logger.error('Unable to parse version \"{}\"'.format(version))\n return\n\n # check the current db version\n current_version = self.inspect()\n if current_version is None:\n self.logger.error('Unable to inspect your database. '\n 'Perhaps you need to run \\'jambi inpsect\\'?')\n return\n\n # get the migrations\n migrations = self.find_migrations()\n latest_version = migrations[-1][1] if any(migrations) else 0\n migrations = tuple(filter(lambda x: x[1] > current_version, migrations))\n\n if current_version > latest_version:\n self.logger.error('Your database version is higher than the '\n 'current database version. '\n '(current: {}, latest: {})'.format(current_version,\n latest_version))\n elif current_version == latest_version:\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # filter out migrations that are beyond the desired version\n if version == 'latest':\n version = latest_version\n migrations = tuple(filter(lambda x: x[1] <= version, migrations))\n if not any(migrations):\n self.logger.info('You are already up to date. '\n '(version: {})'.format(current_version))\n return\n\n # run the migrations\n self.logger.info('Now performing the migration to version {}...'.format(version))\n self.db.connect()\n with self.db.atomic():\n for n, v, m in migrations:\n self.logger.info('>>> [{}] Attempting...'.format(v))\n migrator = PostgresqlMigrator(self.db)\n upgrades = m.upgrade(migrator)\n migrate(*upgrades)\n self._set_version(v)\n self.logger.info('>>> [{}] Success!'.format(v))\n self.db.close()\n self.logger.info('Successfully migrated to version {}...'.format(version))\n return",
"def update_instances_os ( ec2_conn, vpc, base_name, restart = False ) :\n instances = get_all_vpc_instances( ec2_conn, vpc )\n status = ssh_call_vpc( ec2_conn, base_name, instances, \"sudo yum -y update\", True )\n if restart and status == 0 :\n for instance in instances :\n instance.reboot( )",
"def swap_elb_instances ( elb_conn,\n elb,\n new_instance_ids,\n swap_smoothly = True,\n terminate_old_instances = False,\n ec2_conn = None,\n cloudwatch_conn = None ) :\n old_instance_ids = []\n \n if elb.instances:\n old_instance_ids = [ elb_i.id for elb_i in elb.instances ]\n \n \"Sometimes aws does not return the existing instances\"\n if len( old_instance_ids ) == 0 :\n old_instance_ids = [ elb_i.instance_id for elb_i in elb.get_instance_health() ]\n \n print 'old instances'\n print old_instance_ids\n \n \n elb_conn.register_instances( elb.name, new_instance_ids )\n new_instances_started = True\n if swap_smoothly :\n for new_instance_id in new_instance_ids :\n inservice = wait_on_elb_instance( elb_conn, elb.name, new_instance_id )\n if not inservice :\n new_instances_started = False\n\n if len( old_instance_ids ) > 0 :\n elb_conn.deregister_instances( elb.name, old_instance_ids )\n\n if new_instances_started and terminate_old_instances :\n print \"terminating old instances\"\n remove_alarms_on_instances( cloudwatch_conn, old_instance_ids )\n ec2_conn.terminate_instances( old_instance_ids )\n\n return new_instances_started",
"def environment_needs_upgrade(self, db):\n\n return False",
"def deregister_rds_db_instance(RdsDbInstanceArn=None):\n pass",
"def swapdb(self, *args, **kwargs) -> NoReturn:\n raise RedisClusterException(\"SWAPDB is not supported in cluster mode\")",
"def waitForInstanceToRun(instance):\n while True:\n try:\n instance.update()\n break\n except EC2ResponseError:\n continue\n\n for trial in range(0, NUM_RETRY_ATTEMPTS):\n if instance.update() == u'running':\n break\n elif trial == NUM_RETRY_ATTEMPTS-1:\n raise RuntimeError(\"AWS instance failed to startup after %d \" \\\n \"re-checks\" % NUM_RETRY_ATTEMPTS)\n else:\n time.sleep(1)",
"def migration(self):\n nova_connection_destination = self.destination_connection.get_nova_connection(self.destination_region_name)\n cinder_connection_destination = self.destination_connection.get_cinder_connection(self.destination_region_name)\n self._make_migration()\n try:\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n except Exception:\n print(\"Can't found the instance\")\n return\n # TODO make the status in the right way\n while status != 'ACTIVE':\n status = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()['status']\n if status != 'ACTIVE' and status != 'BUILD':\n print(\"Can't launch the instance\")\n raise ValueError(\"Status is not ACTIVE or BUILD: Status = [{}]\".format(status))\n print(\"Instance launch\")\n\n disk_device_attached = False\n # TODO attach the disk in an other function\n while not disk_device_attached:\n disk_device_attached = True\n for storage_resource in self.resource_manager.storage_resource:\n if storage_resource.resource_created == {}:\n print(\"Can't attach the resource {}\".format(storage_resource))\n continue\n try:\n disk_information = cinder_connection_destination.connection.volumes.get(storage_resource.resource_created['id']).to_dict()\n except Exception as exception:\n print(\"Can't get the resource {}\".format(storage_resource))\n continue\n # TODO make an enum for disk_information\n if disk_information['status'] == 'available':\n attach_a_volume(nova_connection_destination, self.resource_manager.instance_resource.resource_created['id'], disk_information['id'])\n print(\"Disk {} attach to instance\".format(disk_information['id']))\n elif disk_information['status'] == 'error' or disk_information['status'] == 'error_deleting' or disk_information['status'] == 'error_backing-up' or disk_information['status'] == 'error_restoring' or disk_information['status'] == 'error_extending':\n print(\"Status error {} for the disk {}\".format(status, disk_information['id']))\n print(\"Disk information {}\".format(disk_information))\n continue\n elif disk_information['status'] != 'in-use':\n disk_device_attached = False\n\n information = nova_connection_destination.connection.servers.get(self.resource_manager.instance_resource.resource_created['id']).to_dict()\n print(\"Addresses\")\n print(json.dumps(information['addresses'], indent=4))",
"def upgrade_environment(self, db):\n\n pass",
"def upgrade_kernel_node(*args, **kwargs):\n for host_string in args:\n with settings(host_string=host_string):\n execute('create_install_repo_node', host_string)\n dist, version, extra = get_linux_distro()\n if version == '12.04':\n print \"upgrading apparmor before upgrading kernel\"\n apt_install([\"apparmor\"])\n print \"Installing 3.13.0-34 kernel headers\"\n apt_install([\"linux-headers-3.13.0-34\"])\n apt_install([\"linux-headers-3.13.0-34-generic\"])\n print \"Upgrading the kernel to 3.13.0-34\"\n apt_install([\"linux-image-3.13.0-34-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux 3.13.0-34-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif version == '14.04':\n if 'version' in kwargs:\n kernel_ver = kwargs.get('version')\n else:\n kernel_ver = \"3.13.0-106\"\n print \"Installing \"+kernel_ver+\" kernel headers\"\n apt_install([\"linux-headers-\"+kernel_ver,\n \"linux-headers-\"+kernel_ver+\"-generic\"])\n print \"Upgrading the kernel to \"+kernel_ver\n apt_install([\"linux-image-\"+kernel_ver+\"-generic\",\n \"linux-image-extra-\"+kernel_ver+\"-generic\"])\n default_grub='Advanced options for Ubuntu>Ubuntu, with Linux '+kernel_ver+'-generic'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'red hat' in dist.lower() and version.startswith('7'):\n print \"Upgrading RHEL kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='Red Hat Enterprise Linux Server (3.10.0-327.10.1.el7.x86_64) 7.2 (Maipo)'\n execute('set_grub_default_node', host_string, value=default_grub)\n elif 'centos linux' in dist.lower() and version.startswith('7'):\n print \"Upgrading Centos kernel to version 3.10.0-327.10.1\"\n pkg_install([\"kernel-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-tools-libs-3.10.0-327.10.1.el7.x86_64\",\n \"kernel-headers-3.10.0-327.10.1.el7.x86_64\"], disablerepo=False)\n default_grub='CentOS Linux (3.10.0-327.10.1.el7.x86_64) 7 (Core)'\n execute('set_grub_default_node', host_string, value=default_grub)",
"def shutdown_kernel(self, now=False, restart=False):\n pass",
"async def switch_dbinstance_ha_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n runtime = util_models.RuntimeOptions()\n return await self.switch_dbinstance_hawith_options_async(request, runtime)",
"def nfvi_cold_migrate_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('cold_migrate_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def delete_instance(db_instance):\n rds = boto3.client('rds')\n rds.delete_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n SkipFinalSnapshot=True\n )",
"def nfvi_resize_revert_instance(instance_uuid, callback, context=None):\n cmd_id = _compute_plugin.invoke_plugin('resize_revert_instance',\n instance_uuid, context,\n callback=callback)\n return cmd_id",
"def upgrade(self, instance_id, cpus=None, memory=None,\r\n nic_speed=None, public=True):\r\n package_items = self._get_package_items()\r\n item_id = []\r\n if cpus:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'cpus', cpus, public)})\r\n if memory:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'memory', memory)})\r\n if nic_speed:\r\n item_id.append({'id': self._get_item_id_for_upgrade(\r\n package_items, 'nic_speed', nic_speed)})\r\n\r\n order = {}\r\n order['complexType'] = \\\r\n 'SoftLayer_Container_Product_Order_Virtual_Guest_Upgrade'\r\n order['virtualGuests'] = [{'id': int(instance_id)}]\r\n order['prices'] = item_id\r\n order['properties'] = [{'name': 'MAINTENANCE_WINDOW',\r\n 'value': str(datetime.datetime.now())}]\r\n if cpus or memory or nic_speed:\r\n self.client['Product_Order'].verifyOrder(order)\r\n self.client['Product_Order'].placeOrder(order)\r\n return True\r\n return False",
"def upgrade_to_version(self, version, mixed_version=False, nodes=None):\n debug('Upgrading to ' + version)\n if not mixed_version:\n nodes = self.cluster.nodelist()\n\n for node in nodes:\n debug('Prepping node for shutdown: ' + node.name)\n node.flush()\n self._check_values()\n self._check_counter_values()\n \n for node in nodes:\n debug('Shutting down node: ' + node.name)\n time.sleep(.5)\n node.stop(wait_other_notice=False)\n\n if ENABLE_VNODES and version >= \"1.2\":\n self.cluster.set_configuration_options(values={'initial_token': None, 'num_tokens': 256})\n\n for node in nodes:\n debug('Upgrading node: ' + node.name)\n node.set_cassandra_dir(cassandra_version=version)\n node.start(wait_other_notice=True)\n time.sleep(.5)\n if not mixed_version:\n node.nodetool('upgradesstables')\n\n if ENABLE_VNODES and version >= \"1.2\" and not mixed_version:\n debug(\"Running shuffle\")\n self.node2.shuffle(\"create\")\n self.node2.shuffle(\"en\")\n\n for node in nodes:\n debug('Checking node: ' + node.name)\n if not mixed_version:\n self._write_values()\n self._check_values()\n\n self._increment_counter_value()\n time.sleep(0.5)\n self._check_counter_values()\n \n if not mixed_version:\n # Check we can bootstrap a new node on the upgraded cluster:\n debug(\"Adding a node to the cluster\")\n self.cluster.set_cassandra_dir(cassandra_version=version)\n nnode = new_node(self.cluster, remote_debug_port=str(2000+len(self.cluster.nodes)))\n nnode.start(no_wait=False)\n nnode.watch_log_for(\"Bootstrap completed!\")\n debug(\"node should be up, but sleeping a bit to ensure...\")\n time.sleep(15)\n self._check_values()\n self._check_counter_values()\n \n if mixed_version:\n debug('Successfully upgraded part of the cluster to %s' % version) \n else:\n debug('Successfully upgraded to %s' % version)",
"def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data",
"def request_shutdown(self, kernel_id, restart=False):",
"def upgrade_device(device, runtime):\n command = 'upgrade \"%s\" \"%s\"' % (device.udid, runtime.identifier)\n _run_command(command)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def terminate_instance(self):\n # connect to ec2\n try:\n ec2_region = [r for r in boto.ec2.regions() if r.name == self._region][0]\n except indexerror:\n print >> sys.stderr, 'unknown region: %s' % self._region\n exit(2)\n ec2_connection = ec2_region.connect()\n\n #import code; code.interact(local=locals())\n instances = reduce(list.__add__, [reservation.instances for reservation in ec2_connection.get_all_instances()])\n name_matches = [i for i in instances\n if i.tags.get('Name', None) == self._instance_name and i.state == 'running']\n\n if (not name_matches):\n raise ValueError('No instance found with name %s' % self._instance_name)\n elif len(name_matches) > 1:\n raise ValueError('Multiple instances found with name %s' % self._instance_name)\n\n instance = name_matches[0]\n\n ec2_connection.terminate_instances(instance_ids=[instance.id])",
"def eurologic_kernel_version(self):\n return self._eurologic_kernel_version",
"def upgradedb(self, args):\n upgrade_db(args.dbfile)",
"def upgrade_db(self):\n repo = Repository(meta.metadata, meta.Session)\n\n if len(self.args) > 1:\n repo.upgrade(self.args[1])\n else:\n repo.upgrade()",
"def do_upgrade(env, ver, cursor):\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))",
"def package_upgrade():\n\n if (do_action_package_upgrade('nova-common',\n do_openstack_upgrade,\n CONFIGS)):\n # we should restart the container scoped (subordinate) plugins after a\n # managed openstack upgrade see: BUG#1835557\n for rid in relation_ids('neutron-plugin'):\n neutron_plugin_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-ceilometer'):\n nova_ceilometer_joined(rid, remote_restart=True)\n for rid in relation_ids('nova-vgpu'):\n nova_vgpu_joined(rid, remote_restart=True)\n # NOTE(ajkavanagh) - if unit is paused (usually true for managed\n # upgrade) then the config_changed() function is a no-op\n config_changed()",
"def _start(self, instance):\n try:\n # Attempt to start the VE.\n # NOTE: The VE will throw a warning that the hostname is invalid\n # if it isn't valid. This is logged in LOG.error and is not\n # an indication of failure.\n _, err = utils.execute('sudo', 'vzctl', 'start', instance['id'])\n if err:\n LOG.error(err)\n except ProcessExecutionError as err:\n LOG.error(err)\n raise exception.Error('Failed to start %d' % instance['id'])\n\n # Set instance state as RUNNING\n db.instance_set_state(context.get_admin_context(),\n instance['id'],\n power_state.RUNNING)\n return True",
"def RebootMachine(instance_id):\n # Terminate the EC2 instance.\n ec2 = ec2_manager.EC2Manager()\n\n logging.info('Rebooting machine with instance id \"%s\".', instance_id)\n ec2.RebootInstances([instance_id])\n\n # Update the corresponding client machine model.\n client_machine.SetMachineStatus(instance_id, enum.MACHINE_STATUS.RUNNING)\n client_machine.IncrementRetryCount(instance_id)",
"async def switch_dbinstance_hawith_options_async(\n self,\n request: dds_20151201_models.SwitchDBInstanceHARequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.SwitchDBInstanceHAResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.node_id):\n query['NodeId'] = request.node_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.role_ids):\n query['RoleIds'] = request.role_ids\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n if not UtilClient.is_unset(request.switch_mode):\n query['SwitchMode'] = request.switch_mode\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='SwitchDBInstanceHA',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.SwitchDBInstanceHAResponse(),\n await self.call_api_async(params, req, runtime)\n )",
"def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')",
"def down(self, arguments):\n force = arguments['--force']\n\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n if not force and vmrun.installedTools():\n stopped = vmrun.stop()\n else:\n stopped = vmrun.stop(mode='hard')\n if stopped is None:\n puts_err(colored.red(\"Not stopped\", vmrun))\n else:\n puts_err(colored.green(\"Stopped\", vmrun))",
"def upgrade_schema():\n\n db_version = get_db_version()\n try:\n while db_version < CURRENT_DATABASE_VERSION:\n db_version += 1\n upgrade_script = 'upgrade_to_'+str(db_version)\n globals()[upgrade_script]()\n except KeyError as e:\n logging.exception('Attempted to upgrade using script that does not exist: {}'.format(e))\n sys.exit(1)\n except Exception as e:\n logging.exception('Incremental upgrade of db failed')\n sys.exit(1)\n else:\n config.db.singletons.update_one({'_id': 'version'}, {'$set': {'database': CURRENT_DATABASE_VERSION}})\n sys.exit(0)",
"def restart(self):\n self.km.restart_kernel(now=True)",
"def reboot_node(self, node):\n params = {'Action': 'RebootInstances'}\n params.update(self._pathlist('InstanceId', [node.id]))\n res = self.connection.request(self.path, params=params).object\n return self._get_boolean(res)",
"def _upgradeDB():\n\n # Set current DB name\n currentDbName = basedefs.DB_NAME\n\n # Before db upgrade we want to make a backup of existing db in case we fail\n # The backup is performed on local system, even for remote DB.\n dbBackupFile = tempfile.mkstemp(suffix=\".sql\", dir=basedefs.DIR_DB_BACKUPS)[1]\n logging.debug(\"backing up %s db to file %s\"%(basedefs.DB_NAME, dbBackupFile))\n\n # Run db backup\n utils.backupDB(basedefs.DB_NAME, getDbUser(), dbBackupFile, getDbHostName(), getDbPort())\n\n # Rename DB first. If it fails - stop with \"active connections\" error.\n # if upgrade passes fine, rename the DB back.\n DB_NAME_TEMP = \"%s_%s\" % (basedefs.DB_NAME, utils.getCurrentDateTime())\n utils.renameDB(basedefs.DB_NAME, DB_NAME_TEMP)\n currentDbName = DB_NAME_TEMP\n\n # if we're here, DB was renamed.\n # upgrade script must run from dbscripts dir\n currentDir = os.getcwd()\n os.chdir(basedefs.DIR_DB_SCRIPTS)\n\n try:\n\n logging.debug(\"upgrading db schema\")\n cmd = [\n os.path.join(basedefs.DIR_DB_SCRIPTS, basedefs.FILE_DB_UPGRADE_SCRIPT),\n \"-u\", getDbUser(),\n \"-d\", DB_NAME_TEMP,\n \"-s\", getDbHostName(),\n \"-p\", getDbPort(),\n ]\n\n # Run upgrade.sh script to update existing db\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_DB_UPGRADE_FAILED)\n\n # Log the successful upgrade\n logging.debug('Successfully upgraded %s DB'%(basedefs.DB_NAME))\n controller.MESSAGES.append(\"DB was upgraded to latest version. previous DB backup can be found at %s\"%(dbBackupFile))\n\n # Go back to previous dir\n os.chdir(currentDir)\n\n # Upgrade was successful, so rename the DB back.\n utils.renameDB(DB_NAME_TEMP, basedefs.DB_NAME)\n currentDbName = basedefs.DB_NAME\n\n # Update rpm version in vdc options\n utils.updateVDCOption(\"ProductRPMVersion\", utils.getRpmVersion(basedefs.ENGINE_RPM_NAME))\n except:\n # Upgrade failed! we need to restore the old db\n logging.debug(\"DB upgrade failed, restoring it to a previous state. DB was backed up to %s\", dbBackupFile)\n\n # Delete the original DB.\n # TODO: handle the case of failure - it should not stop the flow, but should write to the log\n sqlQuery=\"DROP DATABASE %s\" % currentDbName\n utils.execRemoteSqlCommand(getDbUser(), \\\n getDbHostName(), \\\n getDbPort(), \\\n basedefs.DB_POSTGRES, \\\n sqlQuery, False, \\\n output_messages.ERR_DB_DROP)\n\n # Restore the DB\n utils.restoreDB(getDbUser(), getDbHostName(), getDbPort(), dbBackupFile)\n\n raise Exception(output_messages.ERR_DB_UPGRADE_FAILED)",
"def task_upgrade(self):\n with settings(user=self.serviceUser):\n self.update()\n run(\"~/virtualenv/bin/trac-admin {}/trac-env upgrade\".format(self.configDir))\n run(\"~/virtualenv/bin/trac-admin {}/trac-env wiki upgrade\".format(self.configDir))\n\n self.task_restart()",
"def modify_instance_attribute(self, instance_id, attribute, value):\r\n # Allow a bool to be passed in for value of disableApiTermination\r\n if attribute == 'disableApiTermination':\r\n if isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n params = {'InstanceId' : instance_id,\r\n 'Attribute' : attribute,\r\n 'Value' : value}\r\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')",
"def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n lh.log_cmd(\" \".join([str(arg) for arg in args]))\n self.upgradeprocess = psutil.Popen(\n args,\n # stdout=subprocess.PIPE,\n # stdin=subprocess.PIPE,\n # stderr=subprocess.PIPE,\n universal_newlines=True,\n )\n print(\"Upgrade commander has PID:\" + str(self.upgradeprocess.pid))",
"def reboot():\n if not required():\n return \"Kernel reboot not required\"\n cmd_str = 'shutdown -r +1 \"Server is going down for kernel upgrade\"'\n Popen([cmd_str], shell=True, stdin=None,\n stdout=None, stderr=None, close_fds=True)\n return cmd_str",
"def reboot_instances(self, instance_ids=None):\r\n params = {}\r\n if instance_ids:\r\n self.build_list_params(params, instance_ids, 'InstanceId')\r\n return self.get_status('RebootInstances', params)",
"def _check_upgrade(dbapi, host_obj=None):\n is_upgrading, upgrade = is_upgrade_in_progress(dbapi)\n if is_upgrading:\n if host_obj:\n if host_obj.created_at > upgrade.created_at:\n LOG.info(\"New host %s created after upgrade, allow partition\" %\n host_obj.hostname)\n return\n\n raise wsme.exc.ClientSideError(\n _(\"ERROR: Disk partition operations are not allowed during a \"\n \"software upgrade. Try again after the upgrade is completed.\"))",
"def terminate_volumes(db, context, instance_id):\n volume_api = volume.API()\n for bdm in db.block_device_mapping_get_all_by_instance(context,\n instance_id):\n #LOG.debug(_(\"terminating bdm %s\") % bdm)\n if bdm['volume_id'] and bdm['delete_on_termination']:\n volume_api.delete(context, bdm['volume_id'])\n db.block_device_mapping_destroy(context, bdm['id'])",
"def upgrade(self):\n self.config.basedeltadir = os.path.join(const.BASESDIR, time.strftime(\"base_%Y.%m.%d-%Hh%Mm%S\"))\n logger.debug(\"Upgrading the container to create a base in {}\".format(self.config.basedeltadir))\n basedelta = os.path.join(self.containerpath, self.config.basedeltadir)\n os.makedirs(basedelta)\n self.config.command = \"upgrade\"\n self.start()\n self.container.wait('STOPPED', const.UPGRADE_TIMEOUT)\n if self.running:\n raise ContainerError(\"The container didn't stop successfully\")\n self.config.command = \"\"\n if os.path.isfile(os.path.join(basedelta, '.upgrade')):\n raise ContainerError(\"The upgrade didn't finish successfully\")",
"def terminate_instance_in_asg(instance_id):\n if not app_config['DRY_RUN']:\n logger.info('Terminating ec2 instance in ASG {}...'.format(instance_id))\n try:\n response = client.terminate_instance_in_auto_scaling_group(\n InstanceId=instance_id,\n ShouldDecrementDesiredCapacity=True\n )\n if response['ResponseMetadata']['HTTPStatusCode'] == requests.codes.ok:\n logger.info('Termination signal for instance is successfully sent.')\n else:\n logger.info('Termination signal for instance has failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n raise Exception('Termination of instance failed. Response code was {}. Exiting.'.format(response['ResponseMetadata']['HTTPStatusCode']))\n\n except client.exceptions.ClientError as e:\n if 'DryRunOperation' not in str(e):\n raise",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def update_broker(AutoMinorVersionUpgrade=None, BrokerId=None, Configuration=None, EngineVersion=None, Logs=None):\n pass",
"def pool_upgrade_with_fault(self, hosts, pool_id):\n # Verify pool status before upgrade\n expected_status = \"not started\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Enable fault-injection\n self.enable_disable_fault_injection(hosts, enable=True)\n\n # Pool upgrade\n result = run_pcmd(hosts, \"dmg pool upgrade {}\".format(pool_id))\n self.check_result(result)\n # Verify pool status during upgrade\n expected_status = \"in progress\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n # Verify pool status during upgrade\n expected_status = \"failed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)\n\n # Disable fault-injection\n self.enable_disable_fault_injection(hosts, enable=False)\n # Verify pool upgrade resume after removal of fault-injection\n expected_status = \"completed\"\n self.verify_pool_upgrade_status(pool_id, expected_status)",
"def downgrade(self, revision):\n alembic.command.downgrade(self.alembic_config(), revision)",
"def set_attribute(\n attribute,\n attribute_value,\n instance_name=None,\n instance_id=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n filters=None,\n):\n conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)\n\n attribute_list = [\n \"instanceType\",\n \"kernel\",\n \"ramdisk\",\n \"userData\",\n \"disableApiTermination\",\n \"instanceInitiatedShutdownBehavior\",\n \"rootDeviceName\",\n \"blockDeviceMapping\",\n \"productCodes\",\n \"sourceDestCheck\",\n \"groupSet\",\n \"ebsOptimized\",\n \"sriovNetSupport\",\n ]\n if not any((instance_name, instance_id)):\n raise SaltInvocationError(\n \"At least one of the following must be specified: instance_name or\"\n \" instance_id.\"\n )\n if instance_name and instance_id:\n raise SaltInvocationError(\n \"Both instance_name and instance_id can not be specified in the same\"\n \" command.\"\n )\n if attribute not in attribute_list:\n raise SaltInvocationError(\n \"Attribute must be one of: {}.\".format(attribute_list)\n )\n try:\n if instance_name:\n instances = find_instances(\n name=instance_name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile,\n filters=filters,\n )\n if len(instances) != 1:\n raise CommandExecutionError(\n \"Found more than one EC2 instance matching the criteria.\"\n )\n instance_id = instances[0]\n attribute = conn.modify_instance_attribute(\n instance_id, attribute, attribute_value\n )\n if not attribute:\n return False\n return attribute\n except boto.exception.BotoServerError as exc:\n log.error(exc)\n return False",
"def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")",
"def test_rebuild_with_instance_in_stopped_state(self):\n # Initialize the VM to stopped state\n db.instance_update(self.context, self.inst.uuid,\n {\"vm_state\": vm_states.STOPPED})\n self.inst.vm_state = vm_states.STOPPED\n\n self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',\n lambda *a, **ka: True)\n\n self._rebuild(vm_states_is_stopped=True)\n\n # Check the vm state is reset to stopped\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['vm_state'], vm_states.STOPPED)",
"def modify_dbinstance_monitor(\n self,\n request: dds_20151201_models.ModifyDBInstanceMonitorRequest,\n ) -> dds_20151201_models.ModifyDBInstanceMonitorResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_dbinstance_monitor_with_options(request, runtime)",
"def create_instance(db_instance, engine):\n rds = boto3.client('rds')\n rds.create_db_instance(\n DBInstanceIdentifier=db_instance.aws_instance_identifier,\n AllocatedStorage=5,\n DBName=db_instance.db_name,\n Engine=engine,\n # General purpose SSD\n StorageType='gp2',\n\n # can't encrypt t2\n # StorageEncrypted=True,\n\n AutoMinorVersionUpgrade=True,\n # TODO Set this to true?\n MultiAZ=False,\n MasterUsername=db_instance.master_username,\n MasterUserPassword=db_instance.master_password,\n PubliclyAccessible=True,\n DBInstanceClass='db.t2.micro')",
"def downgrade(self, version):\n return NotImplemented",
"def svr_kernel(name, kernel, epsilon=None, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svr_hp_space(_name, epsilon))\n return scope.sklearn_SVR(**hp_space)",
"def AddDatabaseVersion(\n parser, restrict_choices=True, hidden=False, support_default_version=True\n):\n # Section for engine-specific content.\n # This section is auto-generated by //cloud/storage_fe/sql/sync_engines.\n # Do not make manual edits.\n choices = [\n 'MYSQL_5_6',\n 'MYSQL_5_7',\n 'MYSQL_8_0',\n 'POSTGRES_9_6',\n 'POSTGRES_10',\n 'POSTGRES_11',\n 'POSTGRES_12',\n 'POSTGRES_13',\n 'POSTGRES_14',\n 'POSTGRES_15',\n 'SQLSERVER_2017_EXPRESS',\n 'SQLSERVER_2017_WEB',\n 'SQLSERVER_2017_STANDARD',\n 'SQLSERVER_2017_ENTERPRISE',\n 'SQLSERVER_2019_EXPRESS',\n 'SQLSERVER_2019_WEB',\n 'SQLSERVER_2019_STANDARD',\n 'SQLSERVER_2019_ENTERPRISE',\n 'SQLSERVER_2022_EXPRESS',\n 'SQLSERVER_2022_WEB',\n 'SQLSERVER_2022_STANDARD',\n 'SQLSERVER_2022_ENTERPRISE',\n ]\n # End of engine-specific content.\n\n help_text_unspecified_part = (\n DEFAULT_INSTANCE_DATABASE_VERSION + ' is used.'\n if support_default_version\n else 'no changes occur.'\n )\n help_text = (\n 'The database engine type and versions. If left unspecified, '\n + help_text_unspecified_part\n + ' See the list of database versions at '\n + 'https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion.'\n )\n\n if restrict_choices:\n help_text += (\n ' Apart from listed major versions, DATABASE_VERSION also accepts'\n ' supported minor versions.'\n )\n\n parser.add_argument(\n '--database-version',\n required=False,\n default=DEFAULT_INSTANCE_DATABASE_VERSION\n if support_default_version\n else None,\n choices=_MajorVersionMatchList(choices) if restrict_choices else None,\n help=help_text,\n hidden=hidden,\n )",
"def get_booted_kernel():\n try:\n return run(['/usr/bin/uname', '-r'])['stdout'].strip()\n except CalledProcessError as e:\n raise StopActorExecutionError(\n message='Unable to obtain release of the booted kernel.',\n details={'details': str(e), 'stderr': e.stderr}\n )",
"def terminate_ow_instance(ow, ow_instance_id):\n log.info(\"terminate_ow_instance( %s )\", ow_instance_id)\n try:\n ow.stop_instance(InstanceId=ow_instance_id)\n except Exception, e:\n print(e)\n log.info(e)\n sys.exit()\n while True:\n data = ow.describe_instances(InstanceIds=[ow_instance_id])['Instances']\n raw = json.dumps(data)\n ow_instance_json = json.loads(raw)\n print(ow_instance_json[0]['InstanceId'], ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n if ow_instance_json[0]['Status'] == \"stopped\":\n print(ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n log.info(\"%s %s\", ow_instance_json[0]['InstanceId'],\n ow_instance_json[0]['Status'])\n response = ow.delete_instance(InstanceId=ow_instance_id)\n print(response)\n log.info(\"Delete instance = %s\", response)\n break\n else:\n time.sleep(60)\n continue"
] |
[
"0.7936817",
"0.70986426",
"0.6577163",
"0.60453767",
"0.56828487",
"0.5638934",
"0.55407673",
"0.54129595",
"0.53686017",
"0.5318697",
"0.5280949",
"0.5216302",
"0.5101547",
"0.5003827",
"0.49964812",
"0.49521586",
"0.4936471",
"0.48645243",
"0.48442766",
"0.48389387",
"0.4830036",
"0.4824042",
"0.48002774",
"0.47697324",
"0.47693932",
"0.4761148",
"0.47581944",
"0.47528934",
"0.47409526",
"0.4722575",
"0.471879",
"0.470798",
"0.4687996",
"0.4683015",
"0.46755865",
"0.46733037",
"0.4663096",
"0.4656019",
"0.46496236",
"0.46453547",
"0.46270946",
"0.46202233",
"0.45982322",
"0.45954692",
"0.4594078",
"0.45623887",
"0.45512185",
"0.4548221",
"0.45344153",
"0.4522321",
"0.45112482",
"0.4499695",
"0.44926792",
"0.44809604",
"0.44676617",
"0.44628978",
"0.44624385",
"0.44351092",
"0.44230637",
"0.44202343",
"0.44135216",
"0.44132912",
"0.44022372",
"0.439277",
"0.43927556",
"0.43918714",
"0.43888253",
"0.4380331",
"0.4379946",
"0.43727142",
"0.43695274",
"0.43623304",
"0.4360889",
"0.43551987",
"0.433321",
"0.4330988",
"0.4325143",
"0.4316159",
"0.43158963",
"0.42995682",
"0.42755616",
"0.4270044",
"0.42695895",
"0.42666578",
"0.4244219",
"0.4234227",
"0.42332804",
"0.42283377",
"0.42226395",
"0.4219987",
"0.4211163",
"0.4201772",
"0.41894937",
"0.41683677",
"0.41678956",
"0.4158611",
"0.41507742",
"0.41454425",
"0.41406077",
"0.41356564"
] |
0.71337795
|
1
|
Split the dataset in train and test
|
def split_data(X_data, y_data):
return cv.train_test_split(X_data, y_data, test_size=0.1, random_state=0)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)",
"def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)",
"def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set",
"def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r",
"def split_data(data, test_size):\r\n ntest = int(round(len(data) * (1 - test_size)))+1\r\n \r\n train, test = data[:ntest], data[ntest:]\r\n \r\n return train,test",
"def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy",
"def split_test_and_train_data(self, test_size=0.3, view=0):\n X_train, X_test, y_train, y_test = train_test_split(self.data[view], self.class_data, test_size=test_size)\n return X_train, X_test, y_train, y_test",
"def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df",
"def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)",
"def train_val_test_split(data):\n raise NotImplementedError",
"def split_data(self,test=False):\n shuffle_index = torch.randperm(self.train_target.shape[0])\n load = shuffle_index.shape[0]\n train_input_shuffle = self.train_input[shuffle_index]\n train_target_shuffle = self.train_target[shuffle_index]\n train_classes_shuffle = self.train_classes[shuffle_index]\n index_train = self.index_for_equal_class(train_target_shuffle[:load//2])\n train_input = train_input_shuffle[index_train]\n train_target = train_target_shuffle[index_train]\n train_classes = train_classes_shuffle[index_train]\n if not test:\n index_test = self.index_for_equal_class( train_target_shuffle[load//2:]) + load//2\n test_input = train_input_shuffle[index_test]\n test_target = train_target_shuffle[index_test]\n test_classes = train_classes_shuffle[index_test]\n else:\n index_test = self.index_for_equal_class(self.test_target)\n test_input = self.test_input[index_test]\n test_target = self.test_target[index_test]\n test_classes = self.test_classes[index_test]\n train_input, mean, std = normalize(train_input)\n test_input, _, _ = normalize(test_input,mean,std)\n return train_input, train_target, train_classes ,test_input ,test_target ,test_classes",
"def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)",
"def split_data(dataset, test_size=0.5):\n shuffled_data = np.random.RandomState(seed=721).permutation(dataset)\n train_set = shuffled_data[: int(len(dataset) * (1 - test_size)), :]\n test_set = shuffled_data[int(len(dataset) * (1 - test_size)):, :]\n return train_set, test_set",
"def split_data(self):\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n\n return X_train, X_test, y_train, y_test",
"def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test",
"def split_dataset(dataset, test_size):\n train_data = dataset.skip(test_size).shuffle(SHUFFLE_BUFFER_SIZE)\n train_data = train_data.padded_batch(BATCH_SIZE)\n \n test_data = dataset.take(test_size)\n test_data = test_data.padded_batch(BATCH_SIZE)\n \n return train_data, test_data",
"def split_data(X, y, test_size, random_state):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test",
"def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices",
"def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)",
"def splitData(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n print(X_train)\n print(y_train)\n print(X_test)\n print(y_test)\n return X_train, X_test, y_train, y_test",
"def _divide_into_test_train(\n self, test_size: int, train_size: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n X_train, X_test, y_train, y_test = train_test_split(\n self.df.iloc[:, :-1],\n self.df.iloc[:, -1],\n test_size=test_size,\n train_size=train_size,\n )\n return X_train, X_test, y_train, y_test",
"def TrainTestSplit(self,X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, \n test_size=0.33,\n random_state=42)\n return X_train, X_test, y_train, y_test",
"def split_data(self, val_size=0.1, test_size=0.5):\n df = pd.read_csv(self.csv_path, delimiter=';')\n train, val = train_test_split(df, test_size=val_size)\n val, test = train_test_split(df, test_size=test_size)\n return train, val, test",
"def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data",
"def split_dataset(dataset, test_size):\r\n random.shuffle(dataset)\r\n \r\n rating_negativ = []\r\n rating_positiv = []\r\n \r\n for row in dataset:\r\n if int(row[1]) == 0:\r\n rating_negativ.append(row)\r\n elif int(row[1]) == 1:\r\n rating_positiv.append(row)\r\n\r\n random.shuffle(rating_positiv)\r\n random.shuffle(rating_negativ) \r\n \r\n neg_train_data, neg_val_data = train_test_split(rating_negativ, test_size=test_size)\r\n pos_train_data, pos_val_data = train_test_split(rating_positiv, test_size=test_size)\r\n \r\n train_data = neg_train_data + pos_train_data\r\n val_data = neg_val_data + pos_val_data\r\n \r\n random.shuffle(train_data)\r\n random.shuffle(val_data)\r\n \r\n return train_data, val_data",
"def train_test_split(data, validate_size=0.3):\r\n\r\n split = len(data) * (1 - validate_size)\r\n split = int(split)\r\n train = data[:split]\r\n validate = data[split:]\r\n\r\n return train, validate",
"def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test",
"def train_test_split(df, test_size=0.1):\n ntrn = int(round(len(df) * (1 - test_size)))\n\n X_train, y_train = _load_data(df.iloc[0:ntrn])\n X_test, y_test = _load_data(df.iloc[ntrn:])\n\n return (X_train, y_train), (X_test, y_test)",
"def make_split(data, target, test_size=0.3):\n train, test = train_test_split(data, test_size=test_size)\n x_train = train.drop(target, axis=1)\n y_train = train[target]\n x_test = test.drop(target, axis=1)\n y_test = test[target]\n return x_train, y_train, x_test, y_test",
"def train_test_split(x, y, test_pct):\n data = zip(x, y)\n train, test = split_data(data, 1 - test_pct)\n x_train, y_train = zip(*train)\n x_test, y_test = zip(*test)\n return x_train, y_train, x_test, y_test",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y",
"def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels",
"def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test",
"def split_test_train(df, num_months_test = 1):\n train_set = df.iloc[0:len(df)-num_months_test]\n test_set = df.iloc[len(df)-num_months_test:len(df)]\n \n return train_set, test_set",
"def train_test_split(x, y, test_pct):\n\tdata = zip(x,y)\n\ttrain, test = split_data(data, 1 - test_pct)\n\tx_train, y_train = zip(*train)\n\tx_test, y_test = zip(*test)\n\treturn x_train, y_train, x_test, y_test",
"def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test",
"def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test",
"def split_train_test(X, Y, ratio=0.3):\n # Splitting the dataset into the Training set and Test set\n return train_test_split(X, Y, test_size = ratio, random_state = 0)",
"def data_split(self, test_size=0.2, stratify=None):\n return train_test_split(\n self, test_size=test_size, random_state=42, stratify=stratify\n )",
"def split(self, train_mask, test_mask):\n train = self.dataset.loc[train_mask]\n test = self.dataset.loc[test_mask]\n return PandasTrainTestSplit.from_dfs(train, test, self.fguide)",
"def train_test_split(df, random_state=42):\n if len(df) < 3:\n print('no bueno')\n train, test = train_test_split(df, test_size=.2, random_state=random_state)\n train, val = train_test_split(train, test_size=.2, random_state=random_state)\n return train, test, val",
"def split_to_train_test(split_ratio, input_data):\n\n data = input_data.drop_duplicates()\n data = data.sample(frac = 1)\n data = np.r_[data]\n rows, columns = data.shape\n a = int(rows*split_ratio)\n train_data = data[0: a]\n test_data = data[a: rows+1]\n\n return train_data, test_data",
"def split_dataset(df, test_size, seed):\r\n ncols = np.size(df, 1)\r\n X = df.iloc[:, range(0, ncols - 1)]\r\n Y = df.iloc[:, ncols - 1]\r\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\r\n y_train = get_dummies(y_train) # One-hot encoding\r\n y_test = get_dummies(y_test)\r\n return x_train, x_test, y_train, y_test",
"def train_val_test_split(df):\n from sklearn.model_selction import train_test_split\n train, test = train_test_split(df, train_size = 0.80, test_size=0.20,\n random_state = 42)\n train, val = train_test_split(train, train_size = 0.70, val_size=0.30)\n print(train.shape, val.shape, test.shape)\n\n return train, val, test",
"def split_test_train(data, target=\"class\", split=0.20):\n np.random.seed(42)\n\n X = data[[c for c in list(data.columns) if c != target]]\n # y = data[target].astype(\"int\")\n y = data[target].astype(\"category\")\n\n train, test = Data(X, y), None\n if split is not None or split > 0:\n splits = train_test_split(X, y, test_size=split, stratify=y, random_state=42)\n train, test = Data(splits[0], splits[2]), Data(splits[1], splits[3])\n\n return train, test",
"def split_data(df, test_size): \n\n X_train, X_test, y_train, y_test = train_test_split(df[[\"description_processed\", \"transaction_type\", \"transaction_account_type\"]],\n df['transaction_class'],\n test_size=test_size,\n shuffle=True,\n random_state=42)\n \n return X_train, X_test, y_train, y_test",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y",
"def train_test_split_data(data, test_size=0.25):\n \n y = data['fraud'].values\n X = data.drop(columns=['fraud','description','name','org_desc','org_name','payee_name']).values\n \n return train_test_split(X, y, test_size=test_size, stratify=y, shuffle=True)",
"def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)",
"def divide_train_test(self, sentences, tags):\n logging.info('Dividindo dataset em 10 folds')\n kf = KFold(n_splits=10)\n train, test = [], []\n for train_index, test_index in kf.split(sentences):\n train.append(train_index)\n test.append(test_index)\n return train, test",
"def split_data(self):\n self.train, self.val, self.test_x, self.test_y = [], [], [], []\n train_size = self.horizon\n # This assumes all countries have the same length.\n # The minus two gives space for the validation and test sets as they will overshoot.\n k_folds = len(self.countries[0].data)//self.horizon - 2\n for _ in range(k_folds):\n tr, v, te_x, te_y = self.cross_validate(train_size)\n self.train.append(tr), self.val.append(v), self.test_x.append(te_x), self.test_y.append(te_y)\n train_size += self.horizon",
"def split_train_test_dev(self):\n for dir_name in (self.config.train_dir, self.config.dev_dir,\n self.config.test_dir):\n create_dir(dir_name)\n\n self.split_helper(self.config.parsed_train_file_pos, 'pos')\n self.split_helper(self.config.parsed_train_file_neg, 'neg')",
"def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y",
"def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y",
"def split_data(self):\n if not self.load_data:\n raise AttributeError('Preprocessor has not loaded any data.')\n \n # 3 - Find example counts for each set\n self.n_examples = self.data[0].shape[0]\n self.n_train = int(self.n_examples * self.train_ratio)\n self.n_val = int(self.n_examples * self.val_ratio)\n self.n_test = self.n_examples - self.n_train - self.n_val\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')\n if self.n_test < 0:\n raise ValueError('Train + validation ratios must bef < 1')\n\n # 4 - Separate data into train, test, val\n if isinstance(self.data[0], pd.DataFrame):\n logger.info('Dataset is in a dataframe.')\n self.isdataframe = True\n\n self.train_data = [self.data[0].iloc[:self.n_train],\n self.data[1].iloc[:self.n_train]]\n \n self.val_data = [self.data[0].iloc[self.n_train:self.n_val + self.n_train],\n self.data[1].iloc[self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0].iloc[self.n_val + self.n_train:],\n self.data[1].iloc[self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n else:\n self.isdataframe = False\n logger.info('Dataset is in a numpy array.')\n \n # If datasets are numpy array or sparse\n self.train_data = [self.data[0][:self.n_train],\n self.data[1][:self.n_train]]\n \n self.val_data = [self.data[0][self.n_train:self.n_val + self.n_train],\n self.data[1][self.n_train:self.n_val + self.n_train]]\n \n self.test_data = [self.data[0][self.n_val + self.n_train:],\n self.data[1][self.n_val + self.n_train:]]\n logger.info('Data was split into train, val, test.')\n \n assert(self.n_train == self.train_data[0].shape[0])\n assert(self.n_val == self.val_data[0].shape[0])\n assert(self.n_test == self.test_data[0].shape[0])\n \n # Free memory\n del self.data\n \n if self.save_sets:\n self.save_datasets()",
"def train_test_split(X, y, split = 0.20):\n \n import random\n \n lst = [i for i in range(len(X))]\n random.shuffle(lst)\n train_amount = int((1 - split) * len(X))\n\n train_samples_idx = lst[:train_amount]\n test_samples_idx = lst[train_amount:]\n \n X_train = [X[i] for i in train_samples_idx]\n X_test = [X[i] for i in test_samples_idx]\n \n y_train = [y[i] for i in train_samples_idx]\n y_test = [y[i] for i in test_samples_idx]\n \n return X_train, X_test, y_train, y_test",
"def __train_test_splits(self):\n # By default, our indices are just 0-n\n split_indices = list(range(len(self.data)))\n # If shuffling, use our shared Random instance to shuffle our indices before slicing\n if self.shuffle:\n np.random.shuffle(split_indices)\n # Regardless of shuffle, take the first self.train_proportion for training, and the last\n # 1 - self.train_proportion records as test\n train_n = int(self.train_proportion * len(self.data))\n training_indices = split_indices[:train_n]\n test_indices = split_indices[train_n:]\n return training_indices, test_indices",
"def split_train_test_data(total_data_df, frac):\n test_data_df = total_data_df.sample(frac=frac, random_state=1)\n train_data_df = total_data_df.loc[total_data_df.index.difference(test_data_df.index)]\n return train_data_df, test_data_df",
"def split_dataset(x_test, y_test, dev_ratio):\n test_size = len(x_test)\n print(test_size)\n dev_size = (int)(test_size * dev_ratio)\n print(dev_size)\n x_dev = x_test[:dev_size]\n x_test = x_test[dev_size:]\n y_dev = y_test[:dev_size]\n y_test = y_test[dev_size:]\n return x_test, y_test",
"def split_train_test(data, test_ratio):\n shuffled_index = np.random.permutation(len(data))\n test_set_size = int(len(data) * test_ratio)\n\n train_indices = shuffled_index[test_set_size:]\n test_indices = shuffled_index[:test_set_size]\n\n return data.iloc[train_indices], data.iloc[test_indices]",
"def my_train_test_split(act_my_data, act_test_size=0.5):\r\n act_train_df, act_test_df = train_test_split(act_my_data,\r\n test_size=act_test_size)\r\n return act_train_df, act_test_df",
"def read_in_and_split_data(iris_data):\n iris_data = datasets.load_iris()\n data = iris_data['data']\n targets = iris_data['target']\n train_data, test_data, train_targets, test_targets = train_test_split(data, targets, test_size=0.1) \n return (train_data, test_data, train_targets, test_targets)",
"def train_test_split(self, train_test_ratio=0.75, splitter=None, idx_train=None, idx_test=None):\n if splitter is not None:\n idx_test, idx_train = splitter()\n elif idx_train is not None:\n idx_train = np.sort(idx_train)\n elif idx_test is not None:\n idx_test = np.sort(idx_test)\n else: # sequential split\n n_train= int(self.X.shape[0]*train_test_ratio)\n idx_test = range(self.X.shape[0] - n_train)\n idx_train = range(self.X.shape[0] - n_train, self.X.shape[0])\n\n self.trainX, self.trainY = self.X[idx_train, :], self.Y[idx_train]\n self.testX, self.testY = self.X[idx_test, :], self.Y[idx_test]\n self.idx_train, self.idx_test = idx_train, idx_test",
"def split_dataset(X, Y, train_size=0.8):\n if train_size != 1.0:\n return train_test_split(\n X, Y,\n train_size=train_size,\n stratify=Y\n )\n else:\n X_, Y_ = shuffle(\n X, Y\n )\n return X_, [], Y_, []",
"def split_dataset(data_set, train_size, test_size):\n # Generate random indices without replacement, to make train and test sets disjoint\n rand_indices = np.random.choice(data_set.shape[0], train_size+test_size, replace=False)\n feature_end = data_set.shape[1] - 1\n output_location = feature_end\n feature_offset = var.ALGORITHM_INFO['feature_offset']\n\n # Define the training and testing matrices\n x_train = data_set[rand_indices[0:train_size], feature_offset:feature_end]\n y_train = data_set[rand_indices[0:train_size], output_location]\n x_test = data_set[rand_indices[train_size:train_size+test_size], feature_offset:feature_end]\n y_test = data_set[rand_indices[train_size:train_size+test_size], output_location]\n favorite_test = data_set[rand_indices[train_size:train_size+test_size], 0]\n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility\n\n for ii in range(x_train.shape[1]):\n maxval = np.max(np.abs(x_train[:, ii]))\n if maxval > 0:\n x_train[:, ii] = np.divide(x_train[:, ii], maxval)\n x_test[:, ii] = np.divide(x_test[:, ii], maxval)\n\n\n # Add a column of ones; done after to avoid modifying entire data_set\n x_train = np.hstack((x_train, np.ones((x_train.shape[0], 1))))\n x_test = np.hstack((x_test, np.ones((x_test.shape[0], 1))))\n\n return (x_train, y_train), (x_test, y_test), favorite_test",
"def split_dataset(instances, labels, train_split=0.8):\n split = int(train_split * len(instances))\n train_data, train_labels = instances[:split], labels[:split]\n test_data, test_labels = instances[split:], labels[split:]\n\n return train_data, train_labels, test_data, test_labels",
"def split(self, test_size=0.25, random_state=None):\n self.train_index, self.test_index = ms.train_test_split(\n self.data.index, test_size=test_size, random_state=random_state)",
"def test_split_data():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset.tsv\")\n train, val, test = pid.split_data(data_file, datatype='sequence',\n problem_type='classification', num_classes=3)\n\n assert (len(train) == 210) and (len(val) == 45) and (len(test) == 45) and (len(train[0]) == 3)",
"def split_dataset(samples, ratio=0.8):\n nsamples = len(samples)\n num_train = int(ratio*nsamples)\n\n # shuffle samples\n shuffle(samples)\n\n trainset = samples[:num_train]\n testset = samples[num_train:]\n\n return trainset, testset",
"def split_dataset(X: np.array, y: np.array, ratio=0.8):\n '''split dataset to train data and valid data'''\n X_train = X[:int(X.shape[0] * ratio)]\n y_train = y[:int(y.shape[0] * ratio)]\n X_valid = X[int(X.shape[0] * ratio):]\n y_valid = y[int(y.shape[0] * ratio):]\n dataset = tuple([X_train, y_train, X_valid, y_valid])\n\n return dataset",
"def split_data_into_training_and_validation(self, data):\n training_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples))\n validation_dataset = self.get_data_from_indices(data, np.arange(self.num_training_samples,\n self.p.trainer.num_samples))\n return training_dataset, validation_dataset",
"def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df",
"def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)",
"def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y",
"def _split_data(self):\n\n # Set training data\n self.train_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'train'),\n transform=self._transform()\n )\n self.classes = self.train_data.classes\n\n # Set validation data\n self.val_data = torchvision.datasets.ImageFolder(\n os.path.join(self.path, 'test'),\n transform=self._transform(train=False)\n )",
"def split_data(self):\n np.random.seed(seed=self.seed)\n indices = np.random.permutation(self.predictor_vars.shape[0])\n split_row = round(self.predictor_vars.shape[0] * self.train_split)\n train_idx, test_idx = indices[:split_row], indices[split_row:]\n self.predictor_vars_train, self.predictor_vars_test = (\n self.predictor_vars[train_idx, :],\n self.predictor_vars[test_idx, :],\n )\n self.response_var_train, self.response_var_test = (\n self.response_var[train_idx],\n self.response_var[test_idx],\n )",
"def train_test_split(df, frac):\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test",
"def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)",
"def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y",
"def split_data(X:np.ndarray, y:np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n \n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)\n \n return X_train, X_val, y_train, y_val",
"def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test",
"def split_dataset(dataset, train_percentage, valid_percentage):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],\n train_size=train_percentage + valid_percentage,\n test_size=1-(train_percentage + valid_percentage))\n\n valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]\n valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]\n\n return train_x, valid_x, test_x, train_y, valid_y, test_y",
"def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ",
"def split_dev(self):\n\t\tprint(\"Splitting test set into dev and test set\")\n\n\t\told_length = len(self.X[\"test\"])\n\t\tindices = list(range(old_length))\n\n\t\tnp.random.seed(0)\n\t\tnp.random.shuffle(indices)\n\t\t\n\t\tsplit = int(len(indices) * 0.5)\n\n\t\tsplit_indices = {\"test\": indices[:split], \"dev\": indices[split:]}\n\t\n\t\tfor dataset in (\"dev\", \"test\"):\n\t\t\tself.X[dataset] = [self.X[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.Y[dataset] = [self.Y[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.raw_documents[dataset] = [self.raw_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\tself.tokenized_documents[dataset] = [self.tokenized_documents[\"test\"][idx] for idx in split_indices[dataset]]\n\t\t\n\t\tprint(\"Split test set with\", old_length, \"samples into\", len(self.X[\"test\"]), \"/\", len(self.X[\"dev\"]), \"samples\")",
"def split_train_test(ratings):\r\n ratings = ratings.sample(frac=1).reset_index(drop=True)\r\n train_user_list = []\r\n train_item_list = []\r\n train_rating_list = []\r\n test_user_list = []\r\n test_item_list = []\r\n test_rating_list = []\r\n user_pool = set(ratings['userId'].unique())\r\n for idx in user_pool:\r\n flag = 0\r\n items = ratings[ratings['userId']==idx][['itemId','rating']]\r\n for i, row in items.iterrows():\r\n if flag == 0:\r\n test_user_list.append(int(idx))\r\n test_item_list.append(int(row['itemId']))\r\n test_rating_list.append(row['rating'])\r\n flag = 1\r\n else:\r\n train_user_list.append(int(idx))\r\n train_item_list.append(int(row['itemId']))\r\n train_rating_list.append(row['rating'])\r\n\r\n train = pd.DataFrame({'userId': train_user_list, 'itemId': train_item_list, 'rating': train_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n test = pd.DataFrame({'userId': test_user_list, 'itemId': test_item_list, 'rating': test_rating_list}, columns=['userId', 'itemId', 'rating'])\r\n return [train, test]\r\n \r\n\r\n \r\n #train, test = train_test_split(ratings, test_size=0.1, shuffle=True)\r\n #return [train, test]\r",
"def split_train_validation_test(x, y, split):\n\n nsamples = x.shape[0]\n\n if y.shape[0] != nsamples:\n raise Exception('in split_train_validation_test, x has shape {}'.format(x.shape) +\n 'but y has shape {}'.format(y.shape) +\n 'First dimensions do not match')\n\n # make sure split array sums to 1\n split = np.asarray(split)\n split = split / split.sum()\n\n nsamples_train = int(split[0] * nsamples)\n nsamples_valid = int(split[1] * nsamples)\n\n # create a set of randomly shuffled indices\n indices = np.random.permutation(nsamples)\n\n idx_train = indices[:nsamples_train]\n idx_valid = indices[nsamples_train:nsamples_train+nsamples_valid]\n idx_test = indices[nsamples_train+nsamples_valid:]\n\n x_train = x[idx_train]\n y_train = y[idx_train]\n\n x_valid = x[idx_valid]\n y_valid = y[idx_valid]\n\n x_test = x[idx_test]\n y_test = y[idx_test]\n\n return x_train, y_train, x_valid, y_valid, x_test, y_test",
"def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])",
"def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set",
"def split_train_and_test_with_py_datasets(data_set, batch_size=cfg['batch_size'], test_size=0.2, num_works=4,\n pin_memory=True):\n num_dataset = len(data_set)\n indices = list(range(num_dataset))\n split = int(np.floor(test_size * num_dataset))\n\n train_idx, test_idx = indices[split:], indices[:split]\n train_sampler = SubsetRandomSampler(train_idx)\n test_sampler = SubsetRandomSampler(test_idx)\n\n train_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=train_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n test_loader = torch.utils.data.DataLoader(\n dataset=data_set, batch_size=batch_size, sampler=test_sampler, num_workers=num_works,\n pin_memory=pin_memory\n )\n\n return train_loader, test_loader",
"def get_train_test_split(dataset: SentenceComplexityDataset, n_test: float) -> Tuple[Subset, Subset]:\n # determine sizes\n test_size = round(n_test * len(dataset))\n train_size = len(dataset) - test_size\n # calculate the split\n train, test = random_split(dataset, [train_size, test_size],\n generator=torch.Generator().manual_seed(RANDOM_SPLIT_SEED))\n\n return train, test",
"def train_test_split(ratings):\n test = set(range(len(ratings))[::1000])\n train = sorted(set(range(len(ratings))) - test)\n test = sorted(test)\n return ratings.iloc[train], ratings.iloc[test]",
"def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n #X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n #y_val = y[val_idxs]\n\n return X_train, X_test, y_train, y_test,",
"def data_split(X, y):\n folds = KFold(n_splits=SPLITS, shuffle=True, random_state=RANDOM_STATE)\n train_indices, validation_indices = list(folds.split(X))[-1][0], list(folds.split(X))[-1][1]\n\n X_train = X.iloc[train_indices]\n X_validation = X.iloc[validation_indices]\n\n y_train = y.iloc[train_indices]\n y_validation = y.iloc[validation_indices]\n\n return X_train, X_validation, y_train, y_validation",
"def unbalanced_split(dataset, test_size):\n\tprint(\"\\tSplitting data into *unbalanced* training and test sets\")\n\n\tdataset = dataset.drop(\"Date\", axis=1)\n\toutput = train_test_split(dataset.drop(\"Trend\", axis=1).values, dataset[\"Trend\"].values, test_size=test_size, random_state=RANDOM_STATE)\n\n\treturn output",
"def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test",
"def train_test_split(features, target, split_ts):\n\n # split features\n train_features = []\n test_features = []\n for feature in features:\n train_feature, test_feature = feature.split_after(split_ts)\n train_features.append(train_feature)\n test_features.append(test_feature)\n\n # split target\n train_target, test_target = target.split_after(split_ts)\n\n return (train_features, train_target, test_features, test_target)",
"def test_train_split(X, y, test_size):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n print(f\"You have {len(X_train)} training colors and {len(X_test)} test colors - test_size: {test_size*100}.\")\n return X_train, X_test, y_train, y_test",
"def testTrainSplit(self, feature, Class, test_size = 0.2, \n random_state = 0):\n # training and testing sets\n fTrain, fTest, cTrain, cTest = train_test_split( feature, Class,\n test_size = test_size, \n random_state = random_state)\n self.fTrain = fTrain\n self.fTest = fTest\n self.cTrain = cTrain\n self.cTest = cTest\n \n return fTrain, fTest, cTrain, cTest"
] |
[
"0.8802637",
"0.86079246",
"0.8466979",
"0.82975525",
"0.8284587",
"0.827557",
"0.8264588",
"0.81958085",
"0.81672466",
"0.8152023",
"0.8112103",
"0.8103609",
"0.8087945",
"0.80442524",
"0.8010122",
"0.7993666",
"0.7990225",
"0.79382634",
"0.79229766",
"0.79132396",
"0.7867503",
"0.7858341",
"0.78506684",
"0.78422886",
"0.7831858",
"0.78267497",
"0.779849",
"0.77778465",
"0.7767614",
"0.7764626",
"0.77641743",
"0.7742459",
"0.7741432",
"0.77244306",
"0.7713502",
"0.7712554",
"0.7692347",
"0.7692347",
"0.76900584",
"0.768765",
"0.76845974",
"0.76843345",
"0.76789194",
"0.76687014",
"0.76660436",
"0.76563346",
"0.76499796",
"0.7647627",
"0.76360595",
"0.7632988",
"0.7594364",
"0.7582324",
"0.75821966",
"0.7576125",
"0.75711876",
"0.75711876",
"0.75703996",
"0.756918",
"0.756139",
"0.75599927",
"0.7559903",
"0.7559897",
"0.7551557",
"0.7550842",
"0.75465155",
"0.7545277",
"0.7535122",
"0.7529493",
"0.7518956",
"0.7504039",
"0.75030637",
"0.7495576",
"0.7481899",
"0.74787456",
"0.74785596",
"0.74712056",
"0.74702924",
"0.7443507",
"0.74432784",
"0.74354637",
"0.74291885",
"0.7424859",
"0.7419746",
"0.74186337",
"0.7417499",
"0.7412313",
"0.7405067",
"0.740496",
"0.7404896",
"0.74011606",
"0.7390866",
"0.7386532",
"0.73851806",
"0.738227",
"0.73751986",
"0.7365329",
"0.7363048",
"0.73625207",
"0.73623216",
"0.73608875"
] |
0.78183466
|
26
|
Use features and result to train Support Vector Machine
|
def train(features, result):
clf = grid_search(result)
clf.fit(features, result)
return clf
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def trainModel( self, featureTrain, classTrain):",
"def trainNewModel():\n print \"Creating feature vectors for trainset...\"\n trainDependencies = getDependency(trainDepFilename)\n trainLabel, trainFeatureVectors = \\\n createFeatureVectors(trainFilename, trainDependencies)\n print \"Length of feature vector for trainset: %d\" \\\n % len(trainFeatureVectors[0])\n if not len(addTrainsetList) == 0:\n print \"Combining feature vectors of additional trainset...\"\n trainLabel, trainFeatureVectors = \\\n combineAdditionalTrainset(\n addTrainsetList, trainLabel, trainFeatureVectors)\n print \"Feature vectors of trainset created.\"\n SVMTrain(trainLabel, trainFeatureVectors, modelFilename)",
"def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list",
"def svm():",
"def train(self, features, labels):\n pass",
"def train():\n pass",
"def __trainLocal__(self, featureVals, targetVals):\n pass",
"def test_svm():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = r.randint(1, 10598)\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n\n # iris\n pres = \"Test pour le data set Iris (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Iris, quantum_instance)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func(pres, 15, 10, 3, True, Breast_cancer, quantum_instance)\n\n # digits (it's long so be careful)\n #pres = \"Test pour le data set Digits (difficile, classique)\"\n #test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n pres = \"Test pour le data set Wine (moyen, classique)\"\n test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func(pres, 25, 10, 2, True, Gaussian, quantum_instance)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func(pres, 10, 15, 14, True, Sequence, quantum_instance)",
"def buildAndTrain(trainingData):\n\tname = trainingData.drop(['count', 'casual', 'registered'], axis=1).columns\n\ttarget = trainingData['count'].values\n\tfeature = trainingData.drop(['count', 'casual', 'registered'], axis=1).values\n\t# feature scaling\n\tfeature_scaled = preprocessing.scale(feature)\n\t# 0.5 cross validate\n\tcv = cross_validation.ShuffleSplit(len(feature_scaled), n_iter=5, test_size=0.2, random_state=0)\n\t# build model, then training and get accuracy of it\n\tprint('\\n---------岭回归结果--------\\n')\n\tfor train, test in cv:\n\t\tregLR = linear_model.Ridge().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregLR.score(feature_scaled[train], target[train]),\n\t\t regLR.score(feature_scaled[test], target[test])))\n\tprint('\\n---------svm结果--------\\n')\n\tfor train, test in cv:\n\t\tregSvm = svm.SVR().fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregSvm.score(feature_scaled[test], target[test])))\n\tprint('\\n---------随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRF = RandomForestRegressor(n_estimators=100).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRF.score(feature_scaled[test], target[test])))\n\t# reduce some low correction feature\n\tfeatureReduced = trainingData.drop(['count', 'casual', 'registered', 'holiday', 'workingday', 'day'], axis=1).values\n\tfeatureReduced_scaled = preprocessing.scale(featureReduced)\n\tprint('\\n---------减少特征维度以避免过拟合后的随机森林结果--------\\n')\n\tfor train, test in cv:\n\t\tregRFImpr = RandomForestRegressor(n_estimators=100).fit(featureReduced_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFImpr.score(featureReduced_scaled[test], target[test])))\n\t# use grid search algorithm to improve random forest regression\n\tX_train, X_test, y_train, y_test = cross_validation.train_test_split(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tfeature_scaled, target, test_size=0.2, random_state=0)\n\ttuned_parameters = [{'n_estimators': [10,100,500], 'max_depth': [2,3,4,5,6,7,8,9,10]}]\n\tscores = ['r2']\n\n\tfor score in scores:\n\t\tprint(score)\n\t\tclf = GridSearchCV(RandomForestRegressor(), tuned_parameters, cv=5, scoring=score)\n\t\tclf.fit(X_train, y_train)\n\t\tprint(clf.best_estimator_)\n\t\tprint('each parameter combination is ')\n\t\tfor params, mean_score, scores in clf.grid_scores_:\n\t\t\tprint('{0:.3f} (+/-{1:.03f}) for {2}'.format(mean_score, scores.std()/2, params))\n\n\tprint('--------最优参数下的随机森林结果--------')\n\tfor train, test in cv:\n\t\tregRFBest = RandomForestRegressor(n_estimators=100, max_depth=10).fit(feature_scaled[train], target[train])\n\t\tprint('train score:{0:.3f}, test score:{1:.3f}\\n'.format(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[train], target[train]),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tregRFBest.score(feature_scaled[test], target[test])))\n\treturn regRFBest, feature_scaled, target",
"def train(self, test_vector):\n\t\twith open(self.PATH + '/src/data/train_emma.csv', 'rt') as f:\n\t\t\treader = csv.reader(f)\n\n\t\t\ttrain_data = dict()\n\t\t\ttrain_data_labels = list()\n\t\t\ttrain_data_list = []\n\t\t\ttrain_data_labels_list = []\n\n\t\t\tnext(reader, None)\n\t\t\tfor row in reader:\n\t\t\t\tfor idx in range(len(row)):\n\t\t\t\t\tif idx == 0:\n\t\t\t\t\t\ttrain_data['file'] = row[idx]\n\t\t\t\t\tif idx == 1:\n\t\t\t\t\t\ttrain_data['line'] = int(row[idx])\n\t\t\t\t\tif idx == 2:\n\t\t\t\t\t\ttrain_data['timestamp'] = row[idx]\n\t\t\t\t\tif idx == 3:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\t\t\t\t\tif idx == 4:\n\t\t\t\t\t\ttrain_data_labels.append(row[idx])\n\n\t\t\t\ttrain_data_list.append(train_data)\n\t\t\t\ttrain_data_labels_list.append(train_data_labels)\n\t\t\t\ttrain_data = dict()\n\t\t\t\ttrain_data_labels = list()\n\n\t\t\tC = 0.8\n\t\t\tdict_vectorizer = DictVectorizer(sparse=False)\n\t\t\ttrain_data_trasformed = dict_vectorizer.fit_transform(train_data_list)\n\t\t\ttest_vector_transformed = dict_vectorizer.transform(test_vector)\n\n\t\t\t# print(dict_vectorizer.get_feature_names())\n\t\t\t# print(dict_vectorizer.inverse_transform(train_data_trasformed))\n\n\t\t\t# print('Inverse transformation !!!')\n\t\t\t# print(test_vector)\n\t\t\t# inv_trans = dict_vectorizer.inverse_transform(test_vector_transformed)\n\n\t\t\t# fit LinearSVC\n\t\t\t# multi label binarizer to convert iterable of iterables into processing format\n\t\t\tmlb = MultiLabelBinarizer()\n\t\t\ty_enc = mlb.fit_transform(train_data_labels_list)\n\n\t\t\ttrain_vector = OneVsRestClassifier(svm.SVC(probability=True))\n\t\t\tclassifier_rbf = train_vector.fit(train_data_trasformed, y_enc)\n\n\t\t\t# test_vecc = cnt_vectorizer.fit_transform(X[:, 0])\n\t\t\t# # todo use pickle to persist\n\t\t\t# test_vector_reshaped = np.array(test_vector.ravel()).reshape((1, -1))\n\t\t\tprediction = classifier_rbf.predict(test_vector_transformed)\n\n\n\t\t\tprint(\"Predicted usernames: \\n\")\n\t\t\t# print(prediction)\n\t\t\t# print(mlb.inverse_transform(prediction))\n\n\t\t\tusers = self.parse_prediction(mlb.inverse_transform(prediction))\n\t\t\tprint(users)\n\t\t\treturn users",
"def setup_svm_classifier(training_data, y_training, testing_data, features, method=\"count\", ngrams=(1,1)):\n # generate x and y training data\n\n if method == \"count\":\n vec, x_training, x_testing = define_features_vectorizer(features, training_data, testing_data,ngramrange=ngrams)\n elif method == \"tfidf\":\n vec, x_training, x_testing = define_features_tfidf(features, training_data, testing_data,ngramrange=ngrams)\n else:\n print(\"Method has to be either count or tfidf\")\n return 1\n\n # train classifier\n\n model = SVMClassifier_scratch()\n model.fit(x_training, y_training)\n\n return model, vec, x_testing",
"def train(self):\n\t\traise NotImplementedError",
"def test_machine_learning():",
"def train():\n # YOUR TRAINING CODE GOES HERE",
"def test_stage_0():\n\tra_1 = readImage(TRAIN_RAW_IMAGE_1)\n\tre_1 = readImage(TRAIN_RESULT_IMAGE_1)\n\n\tra_2 = readImage(TRAIN_RAW_IMAGE_2)\n\tre_2 = readImage(TRAIN_RESULT_IMAGE_2)\n\n\t# Uncomment below if more examples are required.\n\t# ra_3 = readImage(TRAIN_RAW_IMAGE_3)\n\t# re_3 = readImage(TRAIN_RESULT_IMAGE_3)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_1),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_2 += (\n\t# \tlaplace_operator(TRAIN_RAW_IMAGE_2),\\\n\t# \t# k_means(TRAIN_RAW_IMAGE_2)[0],\\\n\t# \t)\n\n\t# The prediction model is obtained and trained.\n\tengine = get_model((ra_1, ra_2,), (re_1, re_2,), model_type=SVM, percentage=0.1)\n\n\ttest_percentage = float(1) # how many tests\n\n\tra_1 = readImage(TEST_RAW_IMAGE_1)\n\n\t# Uncomment below if the additional features are needed.\n\t# ra_1 += (\n\t# \tlaplace_operator(TEST_RAW_IMAGE_1),\\\n\t# \t# k_means(TEST_RAW_IMAGE_1)[0],\\\n\t# \t)\n\n\tre_1 = readImage(TEST_RESULT_IMAGE_1)\n\n\t# ra_2 = readImage(TEST_RAW_IMAGE_2)\n\t# re_2 = readImage(TEST_RESULT_IMAGE_2)\n\n\tinput_vec = []\n\t# The features are extracted.\n\tinput_vec += buildFeatureArray_2(ra_1[0], ra_1[1], ra_1[2],\\\n\t\tRADIUS_ARRAY,\\\n\t\tadditional_feats=([] if len(ra_1) == 3 else ra_1[3:]))\n\n\tex_no = int(test_percentage * len(input_vec)) # actual number of the test sample\n\n\toutput_vec = []\n\toutput_vec += matrixToArray(re_1[0], lambda el: 1 if el == 255 else 0)\n\n\tprint('Will start predicting...')\n\n\tpredicted_vec = engine.predict(input_vec[:ex_no])\n\n\tcounter = float(0)\n\tfor y, p in zip(output_vec[:ex_no], predicted_vec[:ex_no]):\n\t\tif y == p: counter += 1\n\n\tprint('Accuracy: ' + str(counter/ex_no))\n\n\tpredicted_mat = arrayToMatrix( predicted_vec, len(re_1[0]), len(re_1[0][0]),\\\n\t\tlambda el: 255 if el == 1 else 0)\n\n\t# The predicted segmentation is saved.\n\tsave_rgb_img(\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t np.array(predicted_mat).transpose(),\\\n\t 'pred.bmp',\\\n\t)",
"def train(self, features, labels, seed=None):\n raise NotImplementedError('Not implemented')",
"def train(self):\n feature = Feature(trained=False)\n classifier = LogisticRegression(\n penalty='l2',\n max_iter=100,\n solver='liblinear',\n random_state=self.RAND_SEED)\n\n true_labels = []\n predicted_labels = []\n\n for subj in self.subjects:\n print(subj)\n # preprocess training and testing set\n self.dataset_gen(subject=subj, valid=False)\n\n # train and predict\n pipeline_steps = [('vectorized', feature.vector)]\n if self.istfidf:\n pipeline_steps.append(('tf-idf', feature.tfidftransform))\n if self.islda == 'small':\n pipeline_steps.append(('lda', feature.ldatransform_small))\n elif self.islda == 'large':\n pipeline_steps.append(('lda', feature.ldatransform_large))\n else:\n pass\n if self.isnorm:\n pipeline_steps.append(('scalar', StandardScaler(with_mean=False)))\n pipeline_steps.append(('clf', classifier))\n model = Pipeline(steps=pipeline_steps)\n\n model.fit(self.X_train, self.y_train)\n\n predicted = model.predict(self.X_test)\n # hamming\n predicted_labels.append(predicted)\n true_labels.append(self.y_test)\n\n true_matrix, pred_matrix = np.array(true_labels, int).T, np.array(predicted_labels, int).T\n true_matrix[true_matrix == -1] = 0\n pred_matrix[pred_matrix == -1] = 0\n\n evaluation = Evaluation(self.subjects)\n evaluation.model_evaluate(true_matrix=true_matrix, pred_matrix=pred_matrix, model_name=self.modelname)",
"def walk_forward_cv(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n optimal_params_by_model = {}\r\n cv_metadata_by_model = {}\r\n cv_predictions_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.cv_params = self.cv_params\r\n svm.test_name = self.test_name\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.run_svm_cv()\r\n optimal_params_by_model['SVM'] = svm.svm_optimal_params\r\n cv_metadata_by_model['SVM'] = svm.metadata\r\n cv_predictions_by_model['SVM'] = svm.svm_cv_predictions\r\n \r\n self.optimal_params_by_output[output_name] = optimal_params_by_model\r\n self.cv_metadata_by_output[output_name] = cv_metadata_by_model\r\n self.cv_predictions_by_output[output_name] = cv_predictions_by_model",
"def learn1_svc():\n \n svc.fit(vector_training,sentiment_training) ##fit the training data of vector tweets and sentiments using LinearSVC\n correct = 0\n for i in range(vector_testing.shape[0]): ##using the testing data, see how accurate LinearSVC is\n prediction = svc.predict(vector_testing[i])\n sentiment = sentiment_testing[i]\n if prediction[0] == sentiment:\n correct +=1\n accuracy = correct/vector_testing.shape[0]\n print('Linear Support Vector Classifier Testing Accuracy: {:.2f}'.format(accuracy)) ##print the accuracy of the algorithm",
"def train_svm_model(self, X_train, X_test, y_train, y_test):\r\n clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),\r\n ('clf', LinearSVC())])\r\n clf = clf.fit(X_train, y_train)\r\n pred = clf.predict(X_test)\r\n print('Confusion matrix\\n',confusion_matrix(y_test,pred))\r\n print('Classification_report\\n',classification_report(y_test,pred))\r\n return clf",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self):\n pass",
"def train(self, trainingData, trainingLabels, validationData, validationLabels):\n from sklearn import svm\n \n \"*** YOUR CODE HERE ***\"\n self.sklearn_svm = svm.SVC(C=5, kernel='rbf', gamma=0.005, decision_function_shape='ovo')\n self.sklearn_svm.fit(trainingData, trainingLabels)",
"def train(self)->None:",
"def train(self):\n raise NotImplementedError",
"def train_func(sets,\n\t\t\t\t names=[\"Cake.lie\",\"Cake.lie1\",\"Cake.lie2\",\"Cake.lie3\",\"Cake.lie4\",\"Cake.lie5\",\"Cake.lie6\",\"Cake.lie7\"],\n\t\t\t\t adds=[\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\"],\n\t\t\t\t nfolds=10, cv = None,n_params=100,debug=True):\n\t\n\tret_preds = []\n\n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n\t 'gamma' : [0.001,0.005,0.01,0.05,0.1,0.5,1.0,10.0,100.0],\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"linear\",\"rbf\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVM%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVM%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n\t\n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"linear\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVML%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVML%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n \n\tmodel = SVR()\n\n\tparams = {\n\t 'C': uniform(0.01,300.0),\n\t 'epsilon': uniform(0.01,300.0),\n 'gamma' : expon(scale=.1),\n\t 'max_iter' : [100000000],\n\t 'tol' : [1e-8],\n\t 'kernel': [\"rbf\"]\n\t}\n\t\n\tif debug: print(\"Training SVM...\")\n\tmodel,preds = train_model(sets.drop([\"time\",\"IDENTIFIER\",\"system\"],axis=1, errors='ignore'),\n\t\t\t\t\t \t\t\t\t\t sets[\"time\"],params,model,\n\t\t\t\t\t \t\t\t\t\t scale=False,cv = cv,n_params=n_params)\n\tif debug: print(\"Done training SVM...\")\n\t\n\toutfile = open(\"preds/%s_SVMRBF%s.txt\" % (names[3],adds[3]),\"w\")\n\tfor val in zip(list(sets[\"IDENTIFIER\"]),list(sets[\"time\"]),preds):\n\t\toutfile.write(\"%s,%s,%s\\n\" % val)\n\toutfile.close()\n\n\twith open('mods/%s_SVMRBF%s.pickle' % (names[3],adds[3]), \"w\") as f: \n \t\tpickle.dump(model, f)\n\n\tret_preds.append(preds)\n \n\tret_preds = pd.DataFrame(ret_preds).transpose()\n \n\tret_preds.columns = [\n\t\t\t\t \"%s_SVM_orig%s\" % (names[0],adds[0]),\n\t\t\t\t \"%s_SVML%s\" % (names[1],adds[1]),\n\t\t\t\t \"%s_SVMRBF%s\" % (names[2],adds[2])]\n\n\n\treturn(ret_preds)",
"def svm_clf_training(max_features, data):\r\n X_train, y_train, X_test, y_test = data\r\n clf = Pipeline([('feature_selection', SelectKBest(score_func=chi2, k=max_features)),\r\n ('clf', svm.SVC(C=1., kernel='linear'))])\r\n\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True) # unigrams and bigrams\r\n X_matrix_tr = vectorizer.fit_transform(X_train)\r\n # parameters = [{'clf__kernel': ['linear'], 'clf__C': [0.1, 1, 10, 100]},\r\n # {'clf__kernel': ['rbf'], 'clf__C': [0.1, 1, 10, 100], 'clf__gamma': [0.001, 0.01, 0.1]},\r\n # {'clf__kernel': ['poly'], 'clf__C': [0.1, 1, 10, 100], 'clf__degree': [2, 3, 4, 5]}]\r\n # clf = GridSearchCV(svc, parameters, scoring='accuracy')\r\n clf.fit(X_matrix_tr, y_train)\r\n # print(\"Best parameters set found on development set:\")\r\n # print()\r\n # print(clf.best_estimator_)\r\n # print()\r\n # print(\"Grid scores on development set:\")\r\n # print()\r\n # for params, mean_score, scores in clf.grid_scores_:\r\n # print(\"%0.3f (+/-%0.03f) for %r\"\r\n # % (mean_score, scores.std() / 2, params))\r\n # print()\r\n voc = vectorizer.get_feature_names()\r\n # vectorizer1 = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=voc)\r\n # X_matrix_val = vectorizer1.fit_transform(X_test)\r\n # y_pred = clf.predict(X_matrix_val)\r\n\r\n # for i in range(len(X_test)):\r\n # if y_test[i] != y_pred[i]:\r\n # print(X_test[i], y_test[i], y_pred[i])\r\n # print(classification_report(y_test, y_pred))\r\n return clf, voc",
"def train(self, ):\n raise NotImplementedError",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)",
"def train(self, features, labels):\n self.train_features = features\n self.train_labels = labels\n #raise NotImplementedError",
"def call_features(self, inputs):\n result = self.embedding(inputs)\n inception = []\n for conv, pool, flat in zip(\n self.convolutions, self.pooling, self.flatten\n ):\n tmp = conv(result)\n tmp = pool(tmp)\n tmp = flat(tmp)\n inception.append(tmp)\n result = self.concat(inception)\n result = self.dense1(result)\n result = self.dropout1(result)\n result = self.dense2(result)\n return result",
"def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n import sklearn\n from sklearn import svm\n\n \"*** YOUR CODE HERE ***\"\n self.sklearn_classifier = svm.SVC(C=2, gamma=0.025, decision_function_shape='ovo', tol=0.015)\n self.sklearn_classifier.fit(trainingData, trainingLabels)",
"def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n self.n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #pre-processing data\n X = preprocessing.scale(np.hsplit(my_data,[self.n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[self.n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[self.n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVR(kernel='linear', C=1e3, cache_size=DEFAULT_CACHE_SIZE)\n #self.classifier = svm.SVR(kernel='rbf', C=1e3, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X, Y)",
"def train_SVM(data: np.array, labels: np.array)->None:\n print(\"SVM is not implemented yet!\")",
"def train(self, X, y):",
"def setup():\n # change working directory to\n os.chdir(ROOT_DIR)\n # move to dataFiles\n with misc.cd('dataFiles'):\n print('Now in:', os.getcwd())\n # Load in data\n model_test = models.MlModel('rf', 'water-energy.csv', 'expt')\n # Get feature. I use rdkit2d as it is fast to generate\n df, num_feat, feat_time = features.featurize(model_test.data, model_test.algorithm, [0])\n # Split the data\n train_features, test_features, train_target, test_target, feature_list = features.targets_features(df, 'expt')\n return train_features, test_features, train_target, test_target",
"def train_naive(): # add arguments as needed\n pass",
"def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))",
"def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0",
"def main(args):\n\n # load dataset\n with open(args.infile, 'rb') as fin:\n x_train, y_train, x_test, y_test = pickle.load(fin)\n\n y_train = y_train.astype('int64')\n y_test = y_test.astype('int64')\n\n random_index = list(range(len(x_train)))\n random.shuffle(random_index)\n x_train = np.array(x_train[random_index])\n y_train = np.array(y_train[random_index])\n\n # y_train = y_train.astype(bool).astype(int)\n # y_test = y_test.astype(bool).astype(int)\n\n # combined different features\n feature_extractors = [\n # ('general', MyScaler(False)),\n # ('wordcount', MyCountVectorizer(ngram_range=(1, 1), stop_words='english')),\n ('tfidf', MyTfidfVectorizer(stop_words='english')),\n ]\n combined_feature = FeatureUnion(feature_extractors)\n\n estimators = [('feature', combined_feature),\n ('clf', svm.LinearSVC(C=0.3))]\n pipeline = Pipeline(estimators)\n\n # pipeline.fit(x_train, y_train)\n # print(pipeline.score(x_test, y_test))\n\n # parameters to search\n param_grid = [\n {\n 'clf': [MultinomialNB()],\n 'clf__alpha': [10, 1.0, 0.1, 0.01],\n },\n {\n 'clf': [svm.LinearSVC()],\n 'clf__C': [3, 1, 0.3, 0.1],\n },\n ]\n\n # start training\n t0 = time.time()\n grid = GridSearchCV(pipeline, param_grid=param_grid, verbose=4, n_jobs=4)\n grid.fit(x_train, y_train)\n\n print()\n print('done in %.2f seconds' % (time.time() - t0))\n print()\n print('train accuracy: %.2f%%' % (100 * grid.score(x_train, y_train)))\n print('test accuracy: %.2f%%' % (100 * grid.score(x_test, y_test)))\n print()\n print('the best parameters are:', grid.best_params_)\n print()\n print('confusion matrix:')\n print(metrics.confusion_matrix(y_test, grid.predict(x_test)))",
"def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]",
"def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)",
"def train(self, prob: SvmProblem):\n # Define variables\n self.prob = prob\n c = prob.C\n c2 = prob.gamma\n xkern = prob.xkernel\n xsk = prob.xskernel\n\n # Swap params, so SVM solves X* with correct params\n prob.C = c2\n prob.xkernel = xsk\n\n # Find SVM solution in X*\n svm = SVM()\n priv_clf = svm.train(prob.Xstar, prob)\n\n # Replace swapped out params so modified SVM solves X with correct params\n prob.C = c\n prob.xkernel = xkern\n\n # Get the 'frames of knowledge' - Get the kernel distance from each\n # privileged training data-point to the privileged support vectors\n frames = np.zeros((prob.num, len(priv_clf.support_vectors)))\n for i in range(prob.num):\n for j in range(len(priv_clf.support_vectors)):\n frames[i][j] = prob.xkernel((priv_clf.support_vectors[j]), prob.Xstar[i])\n\n # Form pairs so that each training point is matched against each 'frame of knowledge'\n training_pairs = np.zeros((prob.num, len(priv_clf.support_vectors)), dtype=object)\n for i in range(prob.num):\n for j in range(len(priv_clf.support_vectors)):\n training_pairs[i][j] = [prob.X[i], frames[i][j]]\n training_pairs = np.array(training_pairs)\n\n regr_pairs = np.zeros((len(priv_clf.support_vectors), prob.num), dtype=object)\n for i in range(prob.num):\n for j in range(len(priv_clf.support_vectors)):\n regr_pairs[j][i] = training_pairs[i][j]\n\n # Learn a regression based on above pairs\n self.models = []\n for dataSet in regr_pairs:\n regr = SVR(kernel='rbf')\n xs = []\n ys = []\n for i in range(prob.num):\n xs.append(dataSet[i][0].flatten())\n ys.append(dataSet[i][1])\n xs = np.array(xs)\n ys = np.array(ys)\n self.models.append(regr.fit(xs, ys))\n\n # Transform data from X using learned regression\n new_xs = []\n new_ys = []\n for i in range(prob.num):\n new_xs.append(self.transform(prob.X[i].reshape(1, -1)).flatten())\n new_ys.append(priv_clf.predict(prob.Xstar[i]))\n new_x = np.asarray(new_xs)\n new_y = np.array(new_ys)\n\n # Form a new problem and learn an SVMd+ solution for it\n new_prob = SvmProblem(new_x, prob.Xstar, new_y)\n new_svm = SVMdp()\n self.clf = new_svm.train(new_prob)",
"def train(self):\n raise NotImplementedError()",
"def features_selection(x_train, y_train,x_val,x_test,model,feature_list):\n n_features = x_train.shape[1]\n print(\"n_features original: \",n_features)\n if model == 'LR':\n estimator = LogisticRegression(random_state = 442, penalty = 'elasticnet', solver= 'saga',l1_ratio=0.5)\n if model == 'SVM':\n estimator = svm.LinearSVC(class_weight = 'balanced', random_state = 442)\n if model == 'SGD':\n estimator = SGDClassifier(class_weight = 'balanced', random_state = 442)\n if model == 'ADA':\n estimator = AdaBoostClassifier(DecisionTreeClassifier(max_depth=5, class_weight = 'balanced'),random_state = 442)\n if model == 'RF':\n estimator = RandomForestClassifier(random_state=442, class_weight = 'balanced')\n if model == 'GBT':\n estimator = GradientBoostingClassifier(random_state = 442)\n if model == 'XGBT':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = XGBClassifier(seed = 442,eval_metric = 'auc', scale_pos_weight = ratio)\n if model == 'LightGB':\n ratio = float(np.sum(y_train == 0)) / np.sum(y_train==1)\n estimator = lgb.LGBMClassifier(seed = 442, scale_pos_weight = ratio)\n\n print(\"Searching RFE\")\n classifier = RFE(estimator=estimator, step=1)\n model = Pipeline([('classifier', classifier)])\n parameters = {'classifier__n_features_to_select': [int(n_features*0.25),int(n_features*0.5),int(n_features*0.75),n_features]}\n grid = GridSearchCV(model, parameters, cv=3)\n grid.fit(x_train, y_train)\n num_features = grid.best_params_\n num_features = re.sub(r'[^\\d]','',str(num_features))\n print(\"Optimal number of features\",num_features)\n\n print(\"SelectKBest\")\n selector = SelectKBest(f_classif, k=int(num_features)) #we pass the \"optimal number of features\" discovered in the previous pass\n selector.fit(x_train, y_train)\n x_train = selector.transform(x_train).astype('float32')\n x_val = selector.transform(x_val).astype('float32')\n x_test = selector.transform(x_test).astype('float32')\n feature_list = [feature_list[i] for i in selector.get_support(indices=True)]\n return x_train, x_val, x_test,feature_list, num_features",
"def main():\n \n # The following 5 command lines can be outcommented if the features are already created.\n # There is no need to process the data every single time.\n # Fine tuning the learning algorythm is much faster without that extra step.\n \n # by reading the train dataset the feature index is created.\n # First calling of the processdata function\n # Data limited to 300000\n featureIndexes = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000)\n print \"featureIndex generated!\"\n print len(featureIndexes)\n\n # Trainfeature is created using the indexfeatures...\n # Second calling of the processdata function\n trainFeatures, trainTargets, trainItemIds, trainPrices, trainUrls, trainPhones, trainEmails, trainLength = processData(os.path.join(dataFolder,\"avito_train.tsv\"), itemsLimit=600000) # Original itemsLimit=300000\n\n # Building the test dataset... just like the training...\n testFeatures, testItemIds, testPrices, testUrls, testPhones, testEmails, testLength = processData(os.path.join(dataFolder,\"avito_test.tsv\"), featureIndexes)\n\n # Dumping data into file...\n # joblib.dump((trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds), os.path.join(dataFolder,\"train_data.pkl\"))\n joblib.dump((trainFeatures,trainTargets,trainItemIds,trainPrices,trainUrls,trainPhones,trainEmails,trainLength,\n testFeatures, testItemIds,testPrices,testUrls,testPhones,testEmails,testLength), os.path.join(dataFolder,\"SeparatedByCategory.pkl\"))\n\n\n # loading data pack...\n # trainFeatures, trainTargets, trainItemIds, testFeatures, testItemIds = joblib.load(os.path.join(dataFolder,\"train_data.pkl\"))\n\n #logging.info(\"Feature preparation done, fitting model...\")\n\n # Stochastic gradient model",
"def train(self):\n return",
"def MachineClassifier(options, args):\n\n # Check for setup file in array args:\n if (len(args) >= 1) or (options.configfile):\n if args: config = args[0]\n elif options.configfile: config = options.configfile\n print swap.doubledashedline\n print swap.ML_hello\n print swap.doubledashedline\n print \"ML: taking instructions from\",config\n else:\n print MachineClassifier.__doc__\n return\n\n tonights = swap.Configuration(config)\n \n # Read the pickled random state file\n random_file = open(tonights.parameters['random_file'],\"r\");\n random_state = cPickle.load(random_file);\n random_file.close();\n np.random.set_state(random_state);\n\n\n time = tonights.parameters['start']\n print time\n\n # Get the machine threshold (make retirement decisions)\n threshold = tonights.parameters['machine_threshold']\n prior = tonights.parameters['prior']\n\n # Get list of evaluation metrics and criteria \n eval_metrics = tonights.parameters['evaluation_metrics']\n \n # How much cross-validation should we do? \n cv = tonights.parameters['cross_validation']\n\n survey = tonights.parameters['survey']\n\n #----------------------------------------------------------------------\n # read in the metadata for all subjects (Test or Training sample?)\n storage = swap.read_pickle(tonights.parameters['metadatafile'], 'metadata')\n subjects = storage.subjects\n\n #----------------------------------------------------------------------\n # read in the SWAP collection\n sample = swap.read_pickle(tonights.parameters['samplefile'],'collection')\n\n #----------------------------------------------------------------------\n # read in or create the ML collection\n MLsample = swap.read_pickle(tonights.parameters['MLsamplefile'],\n 'MLcollection')\n\n # read in or create the ML bureau for machine agents (history)\n MLbureau = swap.read_pickle(tonights.parameters['MLbureaufile'],'bureau')\n #if not tonights.parameters['MLbureaufile']:\n # MLbureaufile = swap.get_new_filename(tonights.parameters,'bureau','ML')\n # tonights.parameters['MLbureaufile'] = MLbureaufile\n\n #MLbureau = swap.read_pickle(tonights.parameters['MLbureaufile'],'bureau')\n\n\n #----------------------------------------------------------------------- \n # SELECT TRAINING & VALIDATION SAMPLES \n #-----------------------------------------------------------------------\n # TO DO: training sample should only select those which are NOT part of \n # validation sample (Nair catalog objects) 2/22/16\n\n train_sample = storage.fetch_subsample(sample_type='train',\n class_label='GZ2_label')\n \"\"\" Notes about the training sample:\n # this will select only those which have my morphology measured for them\n # AND which have a true \"answer\" according to GZ2\n # Eventually we could open this up to include the ~10k that aren't in the \n # GZ Main Sample but I think, for now, we should reduce ourselves to this\n # stricter sample so that we always have back-up \"truth\" for each galaxy.\n \"\"\"\n\n try:\n train_meta, train_features = ml.extract_features(train_sample)\n original_length = len(train_meta)\n\n except TypeError:\n print \"ML: can't extract features from subsample.\"\n print \"ML: Exiting MachineClassifier.py\"\n sys.exit()\n\n else:\n # TODO: consider making this part of SWAP's duties? \n # 5/18/16: Only use those subjects which are no longer on the prior\n off_the_fence = np.where(train_meta['SWAP_prob']!=prior)\n train_meta = train_meta[off_the_fence]\n train_features = train_features[off_the_fence]\n train_labels = np.array([1 if p > prior else 0 for p in \n train_meta['SWAP_prob']])\n\n #train_labels = train_meta['Nair_label'].filled()\n\n shortened_length = len(train_meta)\n print \"ML: found a training sample of %i subjects\"%shortened_length\n removed = original_length - shortened_length\n print \"ML: %i subjects had prior probability and were removed\"%removed\n \n\n valid_sample = storage.fetch_subsample(sample_type='valid',\n class_label='Expert_label')\n try:\n valid_meta, valid_features = ml.extract_features(valid_sample)\n except:\n print \"ML: there are no subjects with the label 'valid'!\"\n else:\n valid_labels = valid_meta['Expert_label'].filled()\n print \"ML: found a validation sample of %i subjects\"%len(valid_meta)\n\n # ---------------------------------------------------------------------\n # Require a minimum size training sample [Be reasonable, my good man!]\n # ---------------------------------------------------------------------\n if len(train_sample) < 10000: \n print \"ML: training sample is too small to be worth anything.\"\n print \"ML: Exiting MachineClassifier.py\"\n sys.exit()\n \n else:\n print \"ML: training sample is large enough to give it a shot.\"\n\n # TODO: LOOP THROUGH DIFFERENT MACHINES? \n # 5/12/16 -- no... need to make THIS a class and create multiple \n # instances? Each one can be passed an instance of a machine?\n\n # Machine can be trained to maximize/minimize different metrics\n # (ACC, completeness, purity, etc. Have a list of acceptable ones.)\n # Minimize a Loss function (KNC doesn't have a loss fcn). \n for metric in eval_metrics:\n \n # REGISTER Machine Classifier\n # Construct machine name --> Machine+Metric? For now: KNC\n machine = 'KNC'\n machine = 'RF'\n Name = machine+'_'+metric\n \n # register an Agent for this Machine\n # This \"Agent\" doesn't behave like a SW agent... at least not yet\n\n try: \n test = MLbureau.member[Name]\n except: \n MLbureau.member[Name] = swap.Agent_ML(Name, metric)\n \n MLagent = MLbureau.member[Name]\n\n #--------------------------------------------------------------- \n # TRAIN THE MACHINE; EVALUATE ON VALIDATION SAMPLE\n #---------------------------------------------------------------\n\n # Now we run the machine -- need cross validation on whatever size \n # training sample we have .. \n \n # Fixed until we build in other machine options\n # Need to dynamically determine appropriate parameters...\n\n #max_neighbors = get_max_neighbors(train_features, cv)\n #n_neighbors = np.arange(1, (cv-1)*max_neighbors/cv, 5, dtype=int)\n #params = {'n_neighbors':n_neighbors, \n # 'weights':('uniform','distance')}\n\n num_features = train_features.shape[1]\n \n min_features = int(round(np.sqrt(num_features)))\n params = {'max_features':np.arange(min_features, num_features+1),\n 'max_depth':np.arange(2,16)}\n\n # Create the model \n # for \"estimator=XXX\" all you need is an instance of a machine -- \n # any scikit-learn machine will do. However, non-sklearn machines..\n # That will be a bit trickier! (i.e. Phil's conv-nets)\n general_model = GridSearchCV(estimator=RF(n_estimators=30), \n param_grid=params, n_jobs=-1,\n error_score=0, scoring=metric, cv=cv) \n \n # Train the model -- k-fold cross validation is embedded\n print \"ML: Searching the hyperparameter space for values that \"\\\n \"optimize the %s.\"%metric\n trained_model = general_model.fit(train_features, train_labels)\n\n MLagent.model = trained_model\n\n # Test \"accuracy\" (metric of choice) on validation sample\n score = trained_model.score(valid_features, valid_labels)\n\n ratio = np.sum(train_labels==1) / len(train_labels)\n\n MLagent.record_training(model_described_by=\n trained_model.best_estimator_, \n with_params=trained_model.best_params_, \n trained_on=len(train_features), \n with_ratio=ratio,\n at_time=time, \n with_train_score=trained_model.best_score_,\n and_valid_score=trained_model.score(\n valid_features, valid_labels))\n\n fps, tps, thresh = mtrx.roc_curve(valid_labels, \n trained_model.predict_proba(valid_features)[:,1])\n\n metric_list = compute_binary_metrics(fps, tps)\n ACC, TPR, FPR, FNR, TNR, PPV, FDR, FOR, NPV = metric_list\n \n MLagent.record_validation(accuracy=ACC, recall=TPR, precision=PPV,\n false_pos=FPR, completeness_f=TNR,\n contamination_f=NPV)\n \n #MLagent.plot_ROC()\n\n # ---------------------------------------------------------------\n # IF TRAINED MACHINE PREDICTS WELL ON VALIDATION ....\n # ---------------------------------------------------------------\n if MLagent.is_trained(metric):\n print \"ML: %s has successfully trained and will be applied \"\\\n \"to the test sample.\"\n\n # Retrieve the test sample \n test_sample = storage.fetch_subsample(sample_type='test',\n class_label='GZ2_label')\n \"\"\" Notes on test sample:\n The test sample will, in real life, be those subjects for which\n we don't have an answer a priori. However, for now, this sample\n is how we will judge, in part, the performance of the overall\n method. As such, we only include those subjects which have \n GZ2 labels in the Main Sample.\n \"\"\"\n\n try:\n test_meta, test_features = ml.extract_features(test_sample)\n except:\n print \"ML: there are no subjects with the label 'test'!\"\n print \"ML: which means there's nothing more to do!\"\n else:\n print \"ML: found test sample of %i subjects\"%len(test_meta)\n\n #----------------------------------------------------------- \n # APPLY MACHINE TO TEST SAMPLE\n #----------------------------------------------------------- \n predictions = MLagent.model.predict(test_features)\n probabilities = MLagent.model.predict_proba(test_features)\n\n print \"ML: %s has finished predicting labels for the test \"\\\n \"sample.\"%Name\n print \"ML: Generating performance report on the test sample:\"\n\n test_labels = test_meta['GZ2_label'].filled()\n print mtrx.classification_report(test_labels, predictions)\n\n test_accuracy=mtrx.accuracy_score(test_labels,predictions)\n test_precision=mtrx.precision_score(test_labels,predictions)\n test_recall=mtrx.recall_score(test_labels,predictions)\n\n MLagent.record_evaluation(accuracy_score=test_accuracy,\n precision_score=test_precision,\n recall_score=test_recall,\n at_time=time)\n #pdb.set_trace()\n \n # ----------------------------------------------------------\n # Save the predictions and probabilities to a new pickle\n\n test_meta['predictions'] = predictions\n test_meta['probability_of_smooth'] = probabilities[:,1]\n \n filename=tonights.parameters['trunk']+'_'+Name+'.pickle'\n swap.write_pickle(test_meta, filename)\n\n\n\n \"\"\"\n for thing, pred, p in zip(test_meta, predictions,\n probabitilies):\n \n # IF MACHINE P >= THRESHOLD, INSERT INTO SWAP COLLECTION\n # --------------------------------------------------------\n if (p >= threshold) or (1-p >= threshold):\n print \"BOOM! WE'VE GOT A MACHINE-CLASSIFIED SUBJECT:\"\n print \"Probability:\", p\n # Initialize the subject in SWAP Collection\n ID = thing['asset_id']\n sample.member[ID] = swap.Subject(ID, str(s['SDSS_id']), \n location=s['external_ref']) \n sample.member[ID].retiredby = 'machine'\n \n # Flag subject as 'INACTIVE' / 'DETECTED' / 'REJECTED'\n # ----------------------------------------------------------\n if p >= threshold:\n sample.member[str(s['id'])].state = 'inactive'\n elif 1-p >= threshold:\n sample.member[str(s['id'])].status = 'rejected' \n\n #\"\"\"\n \n \n # If is hasn't been done already, save the current directory\n # ---------------------------------------------------------------------\n tonights.parameters['dir'] = os.getcwd()+'/'+tonights.parameters['trunk']\n \n if not os.path.exists(tonights.parameters['dir']):\n os.makedirs(tonights.parameters['dir'])\n\n\n # Repickle all the shits\n # -----------------------------------------------------------------------\n if tonights.parameters['repickle']:\n\n new_samplefile = swap.get_new_filename(tonights.parameters,'collection')\n print \"ML: saving SWAP subjects to \"+new_samplefile\n swap.write_pickle(sample, new_samplefile)\n tonights.parameters['samplefile'] = new_samplefile\n \n new_samplefile=swap.get_new_filename(tonights.parameters,'MLcollection')\n print \"ML: saving test sample subjects to \"+new_samplefile\n swap.write_pickle(MLsample,new_samplefile)\n tonights.parameters['MLsamplefile'] = new_samplefile\n\n new_bureaufile=swap.get_new_filename(tonights.parameters,'bureau','ML')\n print \"ML: saving MLbureau to \"+new_bureaufile\n swap.write_pickle(MLbureau, new_bureaufile)\n tonights.parameters['MLbureaufile'] = new_bureaufile\n\n metadatafile = swap.get_new_filename(tonights.parameters,'metadata')\n print \"ML: saving metadata to \"+metadatafile\n swap.write_pickle(storage, metadatafile)\n tonights.parameters['metadatafile'] = metadatafile\n\n\n # UPDATE CONFIG FILE with pickle filenames, dir/trunk, and (maybe) new day\n # ----------------------------------------------------------------------\n configfile = config.replace('startup','update')\n\n # Random_file needs updating, else we always start from the same random\n # state when update.config is reread!\n random_file = open(tonights.parameters['random_file'],\"w\");\n random_state = np.random.get_state();\n cPickle.dump(random_state,random_file);\n random_file.close();\n swap.write_config(configfile, tonights.parameters)\n\n return",
"def train(self, training_steps=10):",
"def train_all(X_train_fuse, Y_train, X_dev_fuse, Y_dev, R_train, R_dev, hyperparams):",
"def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)",
"def forward(self, inp):\n out = self.features(inp)\n out = out.view(out.size(0), -1) # linearized the output of the module 'features'\n out = self.classifier(out)\n return out",
"def learn(self, Xtrain, ytrain):",
"def main(result_dir: str, data_atlas_dir: str, data_train_dir: str, data_test_dir: str):\n\n # load atlas images\n putil.load_atlas_images(data_atlas_dir)\n\n print('-' * 5, 'Training...')\n\n # load feature matrix and label vector\n # precomputed by preprocessAndStore.py\n file_id = open('data_train.pckl', 'rb')\n data_train = pickle.load(file_id)\n file_id.close()\n\n file_id = open('labels_train.pckl', 'rb')\n labels_train = pickle.load(file_id)\n file_id.close()\n\n\n ##########################################\n\n # perform a grid search over the parameter grid and choose the optimal parameters\n param_grid = {'C': [0.5, 1, 2.5, 50, 1000]} # grid to search for best parameter C = 0.02\n log_reg_classifier = model_selection.GridSearchCV(sk.LogisticRegression(class_weight='balanced')\n , param_grid, refit=True)\n\n print('abschnitt 1')\n\n data_train_scaled, scaler = util.scale_features(data_train)\n\n start_time = timeit.default_timer()\n\n log_reg_classifier.fit(data_train_scaled, labels_train)\n\n util.print_feature_importance(log_reg_classifier.best_estimator_.coef_)\n\n util.print_class_count(labels_train)\n\n print('abschnitt 2')\n\n #print(\"importance of features: \", log_reg_classifier.best_estimator_.coef_)\n print(\"best estimator: \", log_reg_classifier.best_estimator_)\n print(\"best parameter: \", log_reg_classifier.best_params_)\n\n\n # store trained log_regr\n file_id = open('log_regr.pckl', 'wb')\n pickle.dump(log_reg_classifier, file_id)\n file_id.close()\n file_id = open('scaler.pckl', 'wb')\n pickle.dump(scaler, file_id)\n file_id.close()\n\n print(' Time elapsed:', timeit.default_timer() - start_time, 's')",
"def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')",
"def train(parameters):\n X_train, X_val, X_test, y_train, y_val, y_test = get_data(parameters.feature)\n accuracy = -1\n if parameters.model == 'naive_bayes':\n accuracy, confusion_matrix = train_model(naive_bayes.MultinomialNB(), X_train, y_train, X_test, y_test)\n elif parameters.model == 'random_forest':\n accuracy = 0\n confusion_matrix = None\n for _ in range(10):\n accu, matrix = train_model(ensemble.RandomForestClassifier(n_estimators=40), X_train, y_train, X_test, y_test)\n if accu > accuracy:\n accuracy = accu\n confusion_matrix = matrix\n elif parameters.model == 'SVM':\n accuracy = train_model(svm.SVC(gamma='auto'), X_train, y_train, X_test, y_test)\n\n if accuracy > 0:\n print(\"%s, %s: %f\" % (parameters.model, parameters.feature, accuracy))\n print(confusion_matrix)",
"def main():\n\n if len(sys.argv) < 4 or len(sys.argv) > 5:\n print 'Usage: classifier.py data_dimension train_set_path test_set_path [option: add_bias]'; \n return; \n\n # create sets of possible hyperparameter values\n setC = {0.001, 0.01, 0.1, 1, 10, 25, 100}; # trade off regularizer and error minimization\n setRho = {0.001, 0.01, 0.1, 1}; # learning rate for gradient descent \n hyperparams = [setC, setRho];\n \n # create svm classifier for selected data\n dataDim = int(sys.argv[1]);\n trainPath = str(sys.argv[2]);\n testPath = str(sys.argv[3]);\n if len(sys.argv) == 5:\n c = Classifier('svm', hyperparams, dataDim, testPath, trainPath, addBias=True);\n else:\n c = Classifier('svm', hyperparams, dataDim, testPath, trainPath);\n \n print 'Classifier type: ', c.type, \\\n '\\nTraining set: ', trainPath, \\\n '\\nTest set: ', testPath;\n \n print 'Determining hyperparameters to use...';\n c.learnHyperparams(report=1);\n \n print 'Training classifier...';\n c.train();\n \n print 'Performing inference on test set...';\n c.test(); \n \n print '\\nREPORT:', \\\n '\\nUsing hyperparameters: ', c.theta, \\\n '\\nLearned weight vector: ', c.w, \\\n '\\nPrediction accuracy on test set: ', c.accuracy * 100, ' percent';",
"def forward(self, x):\n out = self.features(x)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out",
"def forward(self, x):\n out = self.features(x)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out",
"def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn",
"def train(self) -> Any:\n pass",
"def train(self, *args, **kwargs):\n raise NotImplementedError",
"def __init__(self, foundVariables, trainingData, trainingClasses, trainingWeights, testingData, testingClasses):\n self.clf = svm.SVC(probability=True)\n self.foundVariables = foundVariables\n self.trainingData = trainingData\n self.trainingClasses = trainingClasses\n self.testingData = testingData\n self.testingClasses = testingClasses\n self.trainingWeights = trainingWeights",
"def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())",
"def train_svm(data: np.ndarray, test_labels: list, test_samples: list, train_labels: list,\n train_samples: list) -> np.ndarray:\n model = svm.SVC(kernel=\"rbf\", C=1024, gamma=2)\n model.fit(get_data_by_indexes(train_samples, data), train_labels)\n prediction = model.predict(get_data_by_indexes(test_samples, data))\n print(\"SVM fitness score {0:5.2f}%\".format(\n model.score(get_data_by_indexes(test_samples, data), test_labels) * float(100)))\n return prediction",
"def test_multimodel_feature_extraction():\n # set up parameters\n testcol = testcol_multi\n exp_id = 'validation1'\n\n params = {}\n\n model1_params = {'func': model.mnist_tfutils}\n model2_params = {'func': model.mnist_tfutils}\n model_params = [model1_params, model2_params]\n num_models = len(model_params)\n\n params['model_params'] = model_params\n\n params['load_params'] = {'host': testhost,\n 'port': testport,\n 'dbname': testdbname,\n 'collname': testcol,\n 'exp_id': 'training0'}\n\n params['save_params'] = {'exp_id': exp_id,\n 'save_intermediate_freq': 1,\n 'save_to_gfs': ['features', 'more_features']}\n\n targdict1 = {'func': get_extraction_target,\n 'to_extract': {'features': 'model_0/validation/valid1/hidden1/output:0',\n 'more_features': 'model_0/validation/valid1/hidden2/output:0'}}\n\n targdict2 = {'func': get_extraction_target,\n 'to_extract': {'features': 'model_1/validation/valid1/hidden1/output:0',\n 'more_features': 'model_1/validation/valid1/hidden2/output:0'}}\n\n targdict1.update(base.DEFAULT_LOSS_PARAMS)\n targdict2.update(base.DEFAULT_LOSS_PARAMS)\n\n validation_params1 = {'valid1': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'targets': targdict1,\n 'num_steps': 10,\n 'online_agg_func': utils.reduce_mean_dict}}\n\n validation_params2 = {'valid1': {'data_params': {'func': data.MNIST,\n 'batch_size': 100,\n 'group': 'test',\n 'n_threads': 4},\n 'queue_params': {'queue_type': 'fifo',\n 'batch_size': 100},\n 'targets': targdict2,\n 'num_steps': 10,\n 'online_agg_func': utils.reduce_mean_dict}}\n\n params['validation_params'] = [validation_params1, validation_params2]\n params['skip_check'] = True\n\n conn = pm.MongoClient(host=testhost,\n port=testport)\n for i in range(num_models):\n valid_exp_id = 'validation0_model_{}'.format(i)\n conn[testdbname][testcol + '.files'].delete_many({'exp_id': valid_exp_id})\n\n # actually run the feature extraction\n base.test_from_params(**params)\n\n # check that things are as expected.\n coll = conn[testdbname][testcol + '.files']\n\n for i in range(num_models):\n exp_id = 'validation1_model_{}'.format(i)\n assert coll.find({'exp_id': exp_id}).count() == 11\n\n # ... load the containing the final \"aggregate\" result after all features have been extracted\n q = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': True}}\n assert coll.find(q).count() == 1\n r = coll.find(q)[0]\n # ... check that the record is well-formed\n asserts_for_record(r, params, train=False)\n\n # ... check that the correct \"intermediate results\" (the actual features extracted) records exist\n # and are correctly referenced.\n q1 = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': False}}\n ids = coll.find(q1).distinct('_id')\n assert r['validation_results']['valid1']['intermediate_steps'] == ids\n\n # ... actually load feature batch 3\n idval = r['validation_results']['valid1']['intermediate_steps'][3]\n fn = coll.find({'item_for': idval})[0]['filename']\n fs = gridfs.GridFS(coll.database, testcol)\n fh = fs.get_last_version(fn)\n saved_data = cPickle.loads(fh.read())\n fh.close()\n first_results = saved_data['validation_results']['valid1']\n assert 'features' in first_results and 'more_features' in first_results\n features = saved_data['validation_results']['valid1']['features']\n more_features = saved_data['validation_results']['valid1']['more_features']\n assert features.shape == (100, 128)\n assert features.dtype == np.float32\n assert more_features.shape == (100, 32)\n assert more_features.dtype == np.float32",
"def test_training(self):\n\t\tpass",
"def svm_train(X, y, b, alpha, n_samples, n_features, learner, loop, eta,\n max_iter=100, step_probability=0.5):\n from pysofia import _sofia_ml\n if isinstance(X, six.string_types):\n if n_features is None:\n # the default in sofia-ml TODO: parse file to see\n n_features = 2**17\n w = _sofia_ml.train(X, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n elif isinstance(X, np.ndarray):\n if n_features is None:\n n_features = X.shape[1]\n\n if n_samples is None:\n n_samples = X.shape[0]\n\n w = _sofia_ml.train_fast(np.float64(X), np.float64(y), n_samples,\n n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n else:\n if n_features is None:\n n_features = X.shape[1]\n\n with tempfile.NamedTemporaryFile() as f:\n datasets.dump_svmlight_file(X, y, f.name, query_id=b)\n w = _sofia_ml.train(f.name, n_features, alpha, max_iter, False,\n learner.value, loop.value, eta.value,\n step_probability)\n return w",
"def test_svm_quantique():\n backend = BasicAer.get_backend('statevector_simulator')\n random_seed = 10598\n\n quantum_instance = QuantumInstance(backend, seed=random_seed, seed_transpiler=random_seed)\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n classical_kernel_estimation(samp_train, samp_test, labels)\n classical_kernel_estimation(samp_train_me, samp_test_me, labels_me)\n\n # Generate the feature map\n feature_map = FirstOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the FirstOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Generate the feature map\n feature_map = SecondOrderExpansion(feature_dimension=2, depth=2)\n\n # Run the Quantum Kernel Estimator and classify the test data\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the SecondOrder feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])\n\n # Last implementation using the custom circuit generator\n print(\"Success for my implementation (second order):\")\n my_impl(samp_train, samp_test, labels)\n my_impl(samp_train_me, samp_test_me, labels_me)\n\n feature_map = CustomExpansion(num_qubits=2, constructor_function=custom_constr, feature_param=[1])\n\n qsvm = QSVM(feature_map=feature_map, training_dataset=samp_train,\n test_dataset=samp_test)\n qsvm_me = QSVM(feature_map=feature_map, training_dataset=samp_train_me,\n test_dataset=samp_test_me)\n\n result = qsvm.run(quantum_instance)\n result_me = qsvm_me.run(quantum_instance)\n print(\"Success of the Custom feature map kernel:\")\n print(result['testing_accuracy'])\n print(result_me['testing_accuracy'])",
"def executeFeatures(dfIn, train = True):\n\n if train == True:\n dfOut = dfIn['TARGET'] #update this with numerical columns that don't need cleaning\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = cleanNames(dfOut)\n dfOut = createPolyFeatures(dfOut)\n else:\n dfOut = dfIn['SK_ID_CURR'] ## tags from test set\n dfOut = standardizedIncome(dfIn, dfOut)\n dfOut = engineerDays(dfIn, dfOut)\n dfOut = createEncoders(dfIn, dfOut)\n dfOut = simplifyEducation(dfIn, dfOut)\n dfOut = simplifyFamily(dfIn, dfOut)\n dfOut = simplifyIncome(dfIn, dfOut)\n dfOut = addExtSources(dfIn, dfOut)\n dfOut = dfOut.drop('CODE_GENDER', axis = 1) ## Need to fix this\n #print(dfOut.columns)\n dfOut = cleanNamesTest(dfOut)\n dfOut = createPolyFeatures(dfOut)\n\n return dfOut",
"def __feature_change(train_x, train_y, val_x, test_x, processors):\n if processors:\n for processor in processors:\n train_x, val_x, test_x = processor(train_x, train_y, val_x, test_x)\n return train_x, val_x, test_x",
"def evaluate(self):\n # Method variables definition\n X_train, X_test, y_train, y_test = dm.reshape_y_set_split_data(self.datasetManager)\n featureScaleDependentVariables = self.datasetManager.params.featureScaleDependentVariables\n\n # Feature Scaling\n X_scaler, X_train = dm.do_feature_scaling(X_train)\n if featureScaleDependentVariables:\n y_scaler, y_train = dm.do_feature_scaling(y_train)\n else:\n y_scaler = None\n y_train = self.datasetManager.y_train\n \n self.X_scaler = X_scaler\n self.y_scaler = y_scaler\n\n # Training the SVR model on the training set\n regressor = SVR(kernel = 'rbf')\n regressor.fit(X_train, y_train.ravel())\n self.regressor = regressor\n\n # Predicting the Test set results\n self.y_pred = y_scaler.inverse_transform(regressor.predict(X_scaler.transform(X_test))) if featureScaleDependentVariables else regressor.predict(X_test)\n \n # Returning the process result : the regression type and the predicted dependent variables set\n return [\"Support Vector Regression\", self.get_r2_score(y_test, self.y_pred)]",
"def run(self):\n error(\"{} needs vector information.\".format(self.get_full_name()), not self.inputs.has_vectors())\n\n if self.is_supervised:\n error(\"{} is supervised and needs an input bundle list.\".format(self.get_full_name()),\n len(self.inputs) <= 1)\n error(\"{} got a {}-long bundle list, but a length of at most 2 is required.\"\n .format(self.get_full_name(), len(self.inputs)),\n len(self.inputs) > 2)\n # verify that the input name is of the vectors source bundle\n vectors_bundle_index = [\n i for i in range(len(self.inputs))\n if self.inputs.get(i).has_vectors()\n ][0]\n expected_name = self.inputs.get(\n vectors_bundle_index).get_source_name()\n if self.input_name != expected_name:\n error(\"Supervised transform was configured to name {} but the vector bundle {} is encountered at runtime.\"\n .format(self.input_name, expected_name))\n\n self.input_vectors = self.inputs.get(vectors_bundle_index).get_vectors().instances\n self.train_epi = self.inputs.get(vectors_bundle_index).get_indices().elements_per_instance\n error(f\"{self.get_full_name()} is supervised and needs label information.\", not self.inputs.has_labels())\n self.train_labels = self.inputs.get_labels(enforce_single=True, roles=roles.train)\n else:\n error(\"{} is not supervised but got an input bundle list, instead of a single bundle.\"\n .format(self.get_full_name()), len(self.inputs) <= 1)\n self.input_vectors = self.inputs.get_vectors().instances\n\n # indexes\n self.train_index = self.inputs.get_indices(role=roles.train)\n self.test_index = self.inputs.get_indices(role=roles.test)\n\n self.populate()\n self.input_dimension = self.input_vectors[0].shape[-1]\n self.compute()\n # set the outputs: transformed vectors and identical indices\n self.outputs.set_vectors(Numeric(vecs=self.vectors))\n self.outputs.set_indices(self.inputs.get_indices())",
"def train_and_eval_scutfbp(train_set_vector, test_set_vector, trainset_label, testset_label, testset_filenames):\n print(\"The shape of training set is {0}\".format(np.array(train_set_vector).shape))\n print(\"The shape of test set is {0}\".format(np.array(test_set_vector).shape))\n reg = linear_model.BayesianRidge()\n reg.fit(train_set_vector, trainset_label)\n\n predicted_label = reg.predict(test_set_vector)\n mae_lr = round(mean_absolute_error(testset_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(testset_label, predicted_label)), 4)\n pc = round(np.corrcoef(testset_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_SCUTFBP.pkl')\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(testset_filenames, predicted_label, testset_label, None, path='./result/Pred_GT_SCUTFBP.csv')\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/BayesRidge_SCUTFBP.csv', index=False)\n print('The result csv file has been generated...')",
"def __init__(self,training_data,default_kernel=\"rbf\"):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVC(class_weight='auto',cache_size=DEFAULT_CACHE_SIZE, kernel=default_kernel)\n self.classifier.fit(X, Y)",
"def train(self, x={}, **kwargs):\n return 0",
"def fit(self, features, targets):\n self.model_features = features\n self.model_targets= targets",
"def TrainingAlgorithm():\n urlFile = 'C:\\\\Users\\\\pokef\\\\Documents\\\\CybersecurityProg\\\\badandgoodurls.csv' # Replace this with data file path\n urlCSV = pd.read_csv(urlFile, ',',error_bad_lines=False)\n urlDF = pd.DataFrame(urlCSV)\n \n urlData = np.array(urlDF)\n \n labels = [d[1] for d in urlData]\n corpus = [d[0] for d in urlData]\n vectorizer = TfidfVectorizer(tokenizer=getTokens) # get a vector for each URL but use our tokenizer\n X = vectorizer.fit_transform(corpus)\n X_train, X_test, y_train, y_test = train_test_split(X, labels, test_size=0.2, random_state=42)\n \n logisticReg = LogisticRegression()\n logisticReg.fit(X_train, y_train)\n print(logisticReg.score(X_test, y_test)) # print accuracy score\n return vectorizer, logisticReg",
"def TrainOneStep(self):\n pass",
"def train(self, X):\n self.X = X",
"def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)",
"def predict(self, instances):\r\n raise NotImplementedError",
"def precompute(self, features, mode, params):\n return None",
"def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()",
"def main(argv):\n\n # Parse arguments and store in model_dict\n model_dict = svm_model_dict_create()\n DR = model_dict['dim_red']\n rev_flag = model_dict['rev']\n strat_flag = 1\n\n # Load dataset and create data_dict to store metadata\n print('Loading data...')\n dataset = model_dict['dataset']\n if (dataset == 'MNIST') or (dataset == 'GTSRB'):\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(\n model_dict)\n img_flag = None\n elif dataset == 'HAR':\n X_train, y_train, X_test, y_test = load_dataset(model_dict)\n img_flag = None\n # TODO: 2 classes case\n # if model_dict['classes'] == 2:\n # X_train = X_train\n\n data_dict = get_data_shape(X_train, X_test)\n n_features = data_dict['no_of_features']\n\n # Reshape dataset to have dimensions suitable for SVM\n X_train_flat = X_train.reshape(-1, n_features)\n X_test_flat = X_test.reshape(-1, n_features)\n # Center dataset with mean of training set\n mean = np.mean(X_train_flat, axis=0)\n X_train_flat -= mean\n X_test_flat -= mean\n\n # Create a new model or load an existing one\n clf = model_creator(model_dict, X_train_flat, y_train)\n model_tester(model_dict, clf, X_test_flat, y_test)\n\n # Assign parameters\n n_mag = 25 # No. of deviations to consider\n dev_list = np.linspace(0.1, 2.5, n_mag) # A list of deviations mag.\n if dataset == 'MNIST':\n rd_list = [784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10] # Reduced dimensions to use\n # rd_list = [784]\n elif dataset == 'HAR':\n rd_list = [561, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n # rd_list = [561]\n n_rd = len(rd_list)\n output_list = []\n clear_flag = None\n # Clear old output files\n if clear_flag ==1:\n abs_path_o = resolve_path_o(model_dict)\n _, fname = file_create(model_dict)\n os.remove(abs_path_o + fname + '.txt')\n _, fname = file_create(model_dict, rd=1, strat=strat_flag, rev=rev_flag)\n os.remove(abs_path_o + fname + '.txt')\n\n # Test clf against adv. samples\n print('Performing attack...')\n if model_dict['classes'] != 2:\n for i in range(n_mag):\n X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])\n output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))\n if img_flag != None:\n save_svm_images(model_dict, data_dict, X_test, X_adv,\n dev_list[i])\n fname = print_svm_output(model_dict, output_list, dev_list)\n # subprocess.call([\"gnuplot -e \\\"filename='{}.png'; in_name='{}.txt'\\\" gnu_in_loop.plg\".format(fname,fname)], shell=True)\n # else:\n # # TODO: 2 classes\n # print('TODO')\n\n # Retrain defense and strategic attack\n print('--------------Retrain Defense & Strategic Attack--------------')\n for rd in rd_list:\n output_list = []\n print('Reduced dimensions: {}'.format(rd))\n\n # Dimension reduce dataset and reshape\n X_train_dr, _, dr_alg = dr_wrapper(\n X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)\n\n # With dimension reduced dataset, create new model or load existing one\n clf = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)\n # Modify classifier to include transformation matrix\n clf = model_transform(model_dict, clf, dr_alg)\n\n model_tester(model_dict, clf, X_test_flat, y_test, rd, rev_flag)\n\n # rev_flag = 1\n # model_dict['rev'] = rev_flag\n # # Dimension reduce dataset and reshape\n # X_train_dr, _, dr_alg = dr_wrapper(\n # X_train_flat, X_test_flat, DR, rd, y_train, rev=rev_flag)\n #\n # # With dimension reduced dataset, create new model or load existing one\n # clf_1 = model_creator(model_dict, X_train_dr, y_train, rd, rev_flag)\n # # Modify classifier to include transformation matrix\n # clf_1 = model_transform(model_dict, clf_1, dr_alg)\n # # Test model on original data\n # model_tester(model_dict, clf_1, X_test_flat, y_test, rd, rev_flag)\n #\n # print clf_1.coef_[0]-clf.coef_[0]\n # print np.linalg.norm(clf_1.coef_[0]), np.linalg.norm(clf.coef_[0])\n # print np.dot(clf_1.coef_[0],clf.coef_[0])/(np.linalg.norm(clf_1.coef_[0])*np.linalg.norm(clf.coef_[0]))\n\n # Strategic attack: create new adv samples based on retrained clf\n print('Performing strategic attack...')\n for i in range(n_mag):\n X_adv, y_ini = mult_cls_atk(clf, X_test_flat, mean, dev_list[i])\n output_list.append(acc_calc_all(clf, X_adv, y_test, y_ini))\n if img_flag != None:\n save_svm_images(model_dict, data_dict, X_test_flat, X_adv,\n dev_list[i], rd, dr_alg, rev_flag)\n\n fname = print_svm_output(model_dict, output_list, dev_list, rd,\n strat_flag, rev_flag)\n\n # fname = dataset +'_' + fname\n subprocess.call(\n [\"gnuplot -e \\\"mname='{}'\\\" gnu_in_loop.plg\".format(fname)], shell=True)",
"def test_predictor():",
"def train_routine(training_file, output_folder):\n if output_folder[-1] != '/':\n output_folder += '/'\n\n svm_file = output_folder + 'svm.txt'\n centroid_file = output_folder + 'centroids.txt'\n ids_file = output_folder + 'ids.txt'\n\n surf = cv2.SURF(250, extended=False)\n categories = dict()\n ids = dict()\n id = 1\n features = list()\n\n print \"Extracting features\"\n for line in open(training_file):\n try:\n category, path = line.split(';')\n except:\n print \"Error: File not in proper format. Ensure: <category/class name>; <path to image of said category>\"\n sys.exit(0)\n path = path.strip()\n\n try:\n img = cv2.imread(path)\n #img = cv2.resize(img, (500, 500))\n except Exception as e:\n print e\n continue\n\n keypoints, descriptors = surf.detectAndCompute(img, None)\n\n if not category in categories:\n categories[category] = Category(label=category)\n ids[category] = id\n id += 1\n categories[category].add_feature(descriptors)\n\n #for category in categories:\n #f = categories[category].yield_features()\n ##features.extend(f)\n #for i in f:\n #features.extend(i)\n\n print \"Calculating centroids\"\n #np_features = numpy.array(features)\n #print \"Features: \", np_features.shape\n #centroids, labels = kmeans2(np_features, FEATURE_TYPES)\n centroids = helpers.loadObject(output_folder + 'centroids.txt')\n print centroids.shape\n\n print \"Forming bag of words\"\n X, Y = [], []\n for category in categories:\n categories[category].calc_bagofwords(centroids)\n for bow in categories[category].bagofwords:\n X.append(bow)\n Y.append(ids[category])\n print \"Fitting linear SVMs onto the bag of words\"\n lin_clf = svm.LinearSVC()\n lin_clf.fit(X, Y)\n\n helpers.saveObject(lin_clf, svm_file)\n helpers.saveObject(centroids, centroid_file)\n helpers.saveObject(ids, ids_file)",
"def train_model(self, text, labels):\n clf = svm.SVR()\n count_vect = CountVectorizer()\n tfidf_transformer = TfidfTransformer()\n counts = count_vect.fit_transform(text)\n tfidf = tfidf_transformer.fit_transform(counts)\n clf.fit(tfidf, labels)\n\n return clf, count_vect, tfidf_transformer",
"def train(self,features,y):\r\n \r\n if self.learn_type == \"nn\":\r\n #generate supervised dataset\r\n return(self.learner.train_on_batch(features,y))\r\n elif self.learn_type == \"linear\":\r\n grad = 0\r\n n = len(features)\r\n for i in range(n):\r\n #sum over the instances to get an estimate of the gradient\r\n print((y[i] - self.learner.activate(features[i])))\r\n grad -= (y[i] - self.learner.activate(features[i])) * \\\r\n self.learner.grad(features[i])\r\n grad /= n\r\n #update paramter\r\n param = np.copy(self.learner.param)\r\n self.learner.param = param - self.alpha * grad\r\n #print(self.learner.param)\r",
"def train_SGD():\n\n #load the data\n path_dataset = \"data/data_train.csv\"\n train = load_data(path_dataset)\n\n \"\"\"matrix factorization by SGD.\"\"\"\n\n #define parameters (optimal parameters from cross-validation)\n gamma = 0.12\n num_features = 25 \n lambda_user = 0.02 \n lambda_item = 0.24\n num_epochs = 100\n\n #run the factorization\n user_features, item_features = matrix_factorization_SGD(train, num_features, lambda_user, lambda_item, num_epochs, gamma)\n\n #save item_features_SGD\n file = open(\"data/item_features_SGD.obj\",\"wb\")\n pickle.dump(item_features,file)\n file.close()\n\n #save user_features_SGD\n file = open(\"data/user_features_SGD.obj\",\"wb\")\n pickle.dump(user_features,file)\n file.close()",
"def custom_training(nb_tweet_sample, randomised, equal_pos_neg, language, name_kernel, Resource, keep_null_vector):\n m_features, m_labels = get_characteristic_label_vectors(nb_tweet_sample, randomised, equal_pos_neg, Resource,\n keep_null_vector, language)\n\n kernel = Kernel.get_correct_kernel(name_kernel)\n custom_SVM = SVM(kernel)\n custom_SVM.fit(m_features, m_labels)\n\n return custom_SVM",
"def forward(self, features):\n outputs = {} \n #features = self.bn(self.linear(features))\n for i in range(len(self.module_list)): \n x = self.module_list[i](features)\n outputs[i] = x\n\n return outputs",
"def my_impl_variational(in_train, in_test, labels):\n X_train = []\n X_test = []\n for lab in labels:\n for datum in in_train[lab]:\n X_train.append([datum, lab])\n for datum in in_test[lab]:\n X_test.append([datum, lab])\n Variationer_learn(X_train, 500, 1, 0.01, X_test, labels)",
"def training(train_data, dev_data, param):\n text_to_vec = TextToVec(**param)\n\n # Fit with both train and dev data\n text_to_vec.fit(train_data['data'] + dev_data['data'])\n word_vec_map = text_to_vec.vectorizer.get_feature_names()\n train_vec = text_to_vec.transform(train_data['data'])\n dev_vec = text_to_vec.transform(dev_data['data'])\n logger.info(f\"train vec size:{train_vec.shape}, dev vec size:{dev_vec.shape}\")\n\n # # apply weights on tfidf based on whether the word appear in multiple classes\n # tt_occ = Counter(train_data['encoded_label'])\n # weight_list = []\n # for i in range(train_vec.shape[1]): # For every feature\n # occ = Counter(train_data['encoded_label'][train_vec[:, i] > 0.0])\n # for key, value in occ.items():\n # occ[key] = value/tt_occ[key]\n # weight_list.append(np.std(list(occ.values()))/0.35)\n # weight = np.array(weight_list).reshape(1, -1)\n # weight = weight/np.max(weight)\n # train_vec = np.multiply(train_vec, weight)\n\n # Perform oversampling on training data\n if param['balanced'] not in ['Bootstrap', 'Handsample']:\n logger.info(f\"class info before resampling: {sorted(Counter(train_data['encoded_label']).items())}\")\n train_vec, train_data['encoded_label'] = resample(X_train=train_vec, y_train=train_data['encoded_label'], balance=param['balanced'])\n logger.info(f\"class info after resampling:{sorted(Counter(train_data['encoded_label']).items())}\")\n\n # Fit model\n if param['classifier'] == 'MultinomialNB':\n clf = MultinomialNB()\n elif param['classifier'] == 'LDA':\n clf = LinearDiscriminantAnalysis()\n else:\n clf = svm.LinearSVC()\n\n if param['multiclass'] == 'OnevsOne':\n model = OneVsOneClassifier(clf)\n else:\n model = OneVsRestClassifier(clf)\n\n if param['classifier'] == 'LinearSVM' or param['multiclass'] == 'OnevsOne':\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['encoded_label'])\n train_prediction = model.predict(train_vec)\n dev_prediction = model.predict(dev_vec)\n else:\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['binary_label'])\n train_prediction = np.argmax(model.predict(train_vec), axis=1)\n dev_prediction = np.argmax(model.predict(dev_vec), axis=1)\n\n\n return train_prediction, dev_prediction, train_vec.shape, dev_vec.shape, model, word_vec_map",
"def __init__(self, train_x, train_y, test_x, test_y, Tunning_Cs=[0.001, 0.01, 0.1, 1, 10]): \n self.Cs = Tunning_Cs\n self.train_x = train_x\n self.train_y = train_y\n self.test_x = test_x \n self.test_y = test_y\n self.model = svm.SVR(kernel='rbf', gamma='auto')",
"def __init__(self, tokens):\n self.mdl = self.train(tokens)",
"def __init__(self, tokens):\n self.mdl = self.train(tokens)"
] |
[
"0.7079623",
"0.6985331",
"0.6951296",
"0.68263084",
"0.67677474",
"0.6763887",
"0.67388266",
"0.66454804",
"0.66344404",
"0.6605667",
"0.6589454",
"0.65607995",
"0.6531744",
"0.6518474",
"0.6516882",
"0.64959514",
"0.6491756",
"0.64540136",
"0.64505",
"0.64383566",
"0.64315933",
"0.64315933",
"0.64315933",
"0.64315933",
"0.64315933",
"0.640507",
"0.63803285",
"0.63614905",
"0.63611233",
"0.6356435",
"0.6352819",
"0.63250226",
"0.63128287",
"0.62993646",
"0.6290715",
"0.6250527",
"0.62410885",
"0.62382317",
"0.6237372",
"0.62127465",
"0.6198232",
"0.6192141",
"0.618729",
"0.6184789",
"0.6166186",
"0.6162354",
"0.61621904",
"0.61587787",
"0.61583745",
"0.6157381",
"0.61507726",
"0.6150383",
"0.61498",
"0.61399984",
"0.6134347",
"0.6130277",
"0.6127967",
"0.6127789",
"0.61260706",
"0.6117015",
"0.611681",
"0.611681",
"0.6112231",
"0.61056507",
"0.61006236",
"0.60810494",
"0.608",
"0.6078148",
"0.60769707",
"0.6068044",
"0.6063198",
"0.60595894",
"0.60524917",
"0.60394126",
"0.60245454",
"0.60171914",
"0.6016649",
"0.6016177",
"0.60101944",
"0.6005842",
"0.60057724",
"0.6005111",
"0.60049474",
"0.60013705",
"0.6000178",
"0.5997376",
"0.59946525",
"0.5992644",
"0.59909016",
"0.59884584",
"0.5978226",
"0.59776425",
"0.59748894",
"0.5974661",
"0.5974322",
"0.59738386",
"0.5973598",
"0.59693646",
"0.5969187",
"0.5969187"
] |
0.700062
|
1
|
Predict labels from trained CLF
|
def predict(clf, features):
return clf.predict(features).astype(np.int)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def predict_label(self, src): # real signature unknown; restored from __doc__\n pass",
"def _predict(self, X):\n predictions = np.asarray([clf.predict(X) for clf in self.clfs_]).T\n predicted_labels = self.combiner.combine(predictions)\n return predicted_labels",
"def predict(self, X_test):\n\n # Predict Label 0\n i = 0\n X = X_test\n\n # Retrieve trained classifier for label 0\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n result = np.zeros((X_test.shape[0], self.label_dim))\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n # iterator = tqdm.trange(1, self.label_dim)\n iterator = range(1, self.label_dim)\n for i in iterator:\n # Predict Label i\n\n # Retrieve trained classifier for label i\n clf = self.trained[i]\n\n # Make prediction\n y = clf.predict(X)\n\n result[:, i] = y\n\n # Concatenate result to X\n # X = sp.hstack([X, sp.csr_matrix(y)], format=\"csr\")\n\n return result",
"def PredictClerLabel(sentence, model_cler, word2vec):\n \n tokenized_sample = word_tokenize(re.sub(\"-\",\" \",sentence))\n features = np.mean([word2vec.word_vec(w) for w in tokenized_sample if w in word2vec],axis=0)\n prediction = model_cler.predict_proba(features.reshape(1,-1))[0]\n return model_cler.classes_[prediction.argmax()]",
"def predict(self, X): \n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n # Input validation\n X = check_array(X)\n\n j= 0\n predicted_labels = np.array([])\n while(j < X.shape[0]):\n current_batch_end = j+self.batch_size if j+self.batch_size < X.shape[0] else X.shape[0]\n current_batch = X[j:current_batch_end]\n self._feedforward(current_batch)\n predicted_labels = np.append(predicted_labels, np.take(self.map_labels, self.bmu_indices))\n j = current_batch_end\n \n return predicted_labels",
"def predict(self, features):\n vec = vectorize(features, self.vocab,\n self.dpvocab, self.projmat)\n label = self.clf.predict(vec)\n # print label\n return self.labelmap[label[0]]",
"def predict(self, inputs):\n if self.use_logistic:\n return self.predict_labels_logistic(self.w, inputs)\n return predict_labels(self.w, inputs)",
"def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]",
"def predict(self, X):\n prob = self.predict_proba(X)\n if self.rule == 'fda':\n prob_1 = prob[:, :self.n_class_]\n prob_2 = prob[:, self.n_class_:]\n return np.vstack((self.labels_[prob_1.argmax(1)], self.labels_[prob_2.argmax(1)]))\n else:\n return self.labels_[prob.argmax(1)]",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def fit_predict(self, X, y=None):\n self.fit(X)\n return self.labels_",
"def predict_labels(clf, features, target):\n\n # Start the clock, make predictions, then stop the clock\n start = time()\n y_pred = clf.predict(features)\n end = time()\n # Print and return results\n print(\"Made predictions in {:.4f} seconds\".format(end - start))\n return accuracy_score(target, y_pred)",
"def predict(self):\n self.get_test_data()\n predicted_labels = []\n for row in self.test_data:\n predicted_labels.append(DecisionTree.predict_row(self.classifier, row))\n return predicted_labels",
"def predict(self, X):",
"def predict(self, X):",
"def predict_labels(model, x_test):\n \n pred = model.predict(x_test)\n #pred_labels = model.predict_classes(x_test) # depricated\n pred_labels = np.argmax(model.predict(x_test), axis=-1)\n \n return pred, pred_labels",
"def predict_only(self):",
"def test_predict(self):\n\n classifier = BertCCAMClassifier()\n classifier.load_model(\"models\")\n prediction = classifier.predict([\"bartosz\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}])\n\n # with multiple labels\n prediction = classifier.predict([\"ala bert\"])\n self.assertEqual(prediction, [{\"labels\": (\"A\", \"B\")}])\n\n # in a batch\n prediction = classifier.predict([\"bartosz\", \"adam\"])\n self.assertEqual(prediction, [{\"labels\": (\"B\",)}, {\"labels\": (\"A\",)}])",
"def predict(model, X):\n\tmodel.eval()\n\t# make the predictions\n\tscores = model.forward(X)\n\n\t# scores contains, for each example, two scores that can be interpreted as the\n\t# probability of each example belonging to each of the classes. To select the\n\t# final predicted label, we will select the class with higher probability.\n\tpredicted_labels = scores.argmax(dim=-1) # predicted_labels shape: (n_examples)\n\n\treturn predicted_labels",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, X):\n ...",
"def predict(self, features):\n feature_labels = []\n for f in features:\n get_label = self.get_k_neighbors(f)\n c0 = get_label.count(0)\n c1 = get_label.count(1)\n if c0 >= c1:\n f_label = 0\n else:\n f_label = 1\n feature_labels.append(f_label)\n return feature_labels\n raise NotImplementedError",
"def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]",
"def predict(self, predPoints=None):",
"def predict(self):\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model = tf.keras.models.load_model(path)\n\n _, _, x_test, y_test = self._load_data()\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n preds = model.predict(x_test)\n self._show_cf_matrix(np.array([np.argmax(probas) for probas in preds]), y_test)",
"def predict(self):\n raise NotImplementedError",
"def predictAuthors(training_fvs, labels, test_fvs):\n clf = MultinomialNB()\n clf.fit(training_fvs, labels)\n return clf.predict(test_fvs)",
"def predict(self, test_set, test_labels):\n\n with tf.Session() as self.tf_session:\n self.tf_saver.restore(self.tf_session, self.models_dir + self.model_name)\n return self.accuracy.eval({self.input_data: test_set, self.input_labels: test_labels})",
"def predict(self, X, pred_batch_size=None):",
"def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})",
"def clf1_predict(self):\n self._pred_clf_1 = self._clf1.predict(self._vectorized_input)[0]",
"def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class)",
"def _predict_label(self, df_train, df_test, label=None):\n #train k-nearest neighbors classifier \n neigh = KNeighborsClassifier(n_neighbors=5)\n X, y = df_train[['longitude', 'latitude']], df_train[label]\n neigh.fit(X, y)\n #predict the label for wildfire incidents\n pred_label = neigh.predict(df_test[['longitude', 'latitude']])\n return pred_label",
"def predict_cli(text, config_filepath):\n load_classifier(config_filepath)\n print(classifier.predict(text))",
"def predict(self):\n train_array = np.array(self.labels != 0, dtype=float)\n if not self.ising:\n labels_logit = self.ising_weights['vertices']\n else:\n neigh_num = self.adj.dot(train_array)\n neigh_num = np.where(neigh_num == 0, 1, neigh_num)\n neigh_weights = self.ising_weights['edges'] * self.labels\n labels_logit = (np.multiply(neigh_weights, neigh_num**(-1))\n + self.ising_weights['vertices'])\n self.prediction = np.where(labels_logit > 0, 1, -1)\n return self",
"def predict(self, eval_features):\n\t\tinput_ids = torch.tensor(eval_features.input_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tinput_mask = torch.tensor(eval_features.input_mask, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\tsegment_ids = torch.tensor(eval_features.segment_ids, dtype=torch.long).to(self.device).unsqueeze(0)\n\t\t\n\t\twith torch.no_grad():\n\t\t\tlogits = self.model(input_ids, segment_ids, input_mask)\n\t\t\tlogits = logits.to(\"cpu\")\n\t\t\tsoftmax_logits = F.softmax(logits[0], dim=0).numpy()\n\t\t\tprint(\"softmax score : \", softmax_logits)\n# final_logits = list(zip(list(map(lambda x : self.reverse_label_map[np.ravel(np.where(softmax_logits==x))[0]], softmax_logits )), softmax_logits))\n\t\tpred = np.argmax(softmax_logits)\n\t\tprob = np.max(softmax_logits)\n\t\t\n\t\treturn pred , prob",
"def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds",
"def predict(self, test_file_path: str) -> List[Dict[str, float]]:\n # TODO write code to extract features from test_file_path and \n # predict the labels for the model.\n pass",
"def predict_class(clf, X_test, Y_test, labels=None, stats_fname=None):\n expected = Y_test\n if isinstance(clf, KerasModel):\n char_probs = clf.predict(X_test)\n predicted = np.argmax(char_probs, axis=1)\n\n if len(Y_test.shape) > 1:\n expected = np.argmax(Y_test, axis=1)\n else:\n predicted = clf.predict(X_test)\n\n conf_mat = metrics.confusion_matrix(\n expected, predicted, labels=range(len(labels))\n )\n\n stats = {\n 'Accuracy': metrics.accuracy_score(expected, predicted),\n 'F1': metrics.f1_score(expected, predicted, average='weighted'),\n 'Precision': metrics.precision_score(expected, predicted,\n average='weighted'),\n 'Recall': metrics.recall_score(expected, predicted,\n average='weighted')\n }\n print('Accuracy: %f' % stats['Accuracy'])\n print('F1: %f' % stats['F1'])\n print('percision: %f' % stats['Precision'])\n print('recall: %f' % stats['Recall'])\n\n save_conf_mat(conf_mat, stats, labels, stats_fname)\n\n return predicted",
"def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)",
"def make_predictions(df):\n t_labels = get_labels(\"labels_pca\")\n # clean data\n df = clean_data(df)\n # engineer data\n df = engineer_features(df)\n # predict\n with open(\"model.pkl\",\"r\") as mdl:\n model = pickle.load(mdl)\n mdl.close()\n predictions = model.predict(df[t_labels])\n return predictions",
"def predict_cf(self, x, **kwargs):\n pass",
"def predict_category(self):\n pass",
"def predict(self, x):\n pred_labels = np.zeros((x.shape[0], 10))\n\n N = len(self.NET)\n for i in range(N):\n\n inputs = self.apply_dct_permutation(x.copy(), self.permutation[i])\n pred_labels += self.NET[i].model.predict(inputs)\n\n return pred_labels",
"def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds",
"def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(self, X):\n pass",
"def predict(cls, input):\n clf = cls.get_model() \n\n input.to_csv(data_dir + 'vdok_predction_src_file.csv')\n\n q = qa_serializer_lang_selector(data_dir)\n q.serialize_record('vdok_predction_src_file.csv', task_name)\n q.select_lang([1], task_name).to_csv(data_dir + data_file, encoding= 'latin1')\n\n pipeline=['pos', 'lemma', 'synset', 'hype', 'hypo']\n\n bnlqd = fex_basic_nlp(data_file, data_dir)\n bnlqd.nlp_run(pipeline[0])\n bnlqd.nlp_run(pipeline[1])\n bnlqd.df_ac_lemma.to_csv(data_dir + 'Lemma-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[2])\n bnlqd.df_ac_synset.to_csv(data_dir + 'Synset-' + data_file , encoding= 'latin1')\n bnlqd.nlp_run(pipeline[3])\n bnlqd.df_ac_hypernyms.to_csv(data_dir + 'Hypernyms-' + data_file, encoding= 'latin1')\n bnlqd.nlp_run(pipeline[4])\n bnlqd.df_ac_hyponyms.to_csv(data_dir + 'Hyponyms-' + data_file, encoding= 'latin1')\n\n bnlpd = fex_basic_nlp(def_file, data_dir, task_name)\n bnlpd.nlp_run(pipeline[0])\n bnlpd.nlp_run(pipeline[1])\n bnlpd.df_ac_lemma.to_csv(data_dir + 'Lemma-P-' + data_file, encoding= 'latin1')\n \n btgqd = bi_trigram(data_file, data_dir)\n btgqd.nlp_run(r'bigram')\n btgqd.nlp_run(r'trigram') \n\n stop_words_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words)\n\n oanc_shelve = oanc_resource + 'ANC-all-lemma-04262014.db'\n oalqd = odi_oanc_lemma_frequency(data_file, oanc_shelve, None, data_dir, stop_words_d) \n oalqd.oanc_lemma_frequency('Lemma-' + data_file, 'Student_Question_Index', 'Pre_Col_Name')\n \n stop_words_hy_d = cls.remove_non_extracted_stop_word(bnlqd.df_ac_lemma, stop_words_hy)\n\n ovlqd = odi_overlapping(data_file, def_file, data_dir, stop_words_d)\n ovlqd.count_overlapping('Lemma-' + data_file, 'Student_Question_Index',\n 'Pre_Col_Name', 'Question_ID', 'Question_ID_Sec',\n 'Lemma-P-' + data_file, 'Question_ID', 'Question_ID_Sec')\n ovlqd.count_overlapping_synset('Synset-' + data_file)\n ovlqd.count_overlapping_hypernyms('Hypernyms-' + data_file, stop_words_hy_d)\n ovlqd.count_overlapping_hyponyms('Hyponyms-' + data_file, stop_words_hy_d)\n\n df_ac_pmi_dist_bigram = cls.bi_trigram_pmi_distribution(pmi_bigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_bigram, 'bigram')\n df_ac_pmi_dist_trigram = cls.bi_trigram_pmi_distribution(pmi_trigram_file, data_dir, \n bnlqd.num_clm_in, btgqd.df_ac_trigram, 'Trigram')\n\n df_ac_aggregate = cls.aggregate_plim(bnlqd, oalqd, ovlqd, df_ac_pmi_dist_bigram, df_ac_pmi_dist_trigram,\n bnlpd, specific_count_lemmas, stop_words_pos, task_name)\n df_ac_aggregate.to_csv(data_dir + 'vdok_predction_Aggregate_plim.csv', encoding= 'latin1')\n df_ac_aggregate_item_level = cls.aggregate_item_level_plim(df_ac_aggregate, oalqd.stem_option_name_clm, \n task_name)\n df_ac_aggregate_item_level.to_csv(data_dir + 'vdok_predction_Key_Stem_Passage_Aggregate_plim.csv',\n encoding= 'latin1')\n\n rfrpod = tmv_RF_classify('Independent_Variable_w_Label-Def.csv', data_dir)\n rfrpod.load_data('vdok_predction_Key_Stem_Passage_Aggregate_plim.csv', True, drop_vars, dependent_var)\n clf.perform_prediction(rfrpod.df_ac_modeling_values)\n return clf.df_ac_classified",
"def predict(self, X):\r\n num_test = X.shape[0]\r\n # lets make sure that the output type matches the input type\r\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\r\n\r\n # loop over all test rows\r\n for i in range(num_test):\r\n print (\"Testing example \" + str(i))\r\n distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1)\r\n # distances = self.chi2_distance(self.Xtr, X[i,:])\r\n min_index = np.argmin(distances) # get the index with smallest distance\r\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\r\n print (\"Class Label: \" + str(Yte[i]) + \" \" + \"Predicted label: \" + str(Ypred[i]))\r\n return Ypred",
"def predict():\n to_predict = np.zeros(5).reshape(1, 5)\n features = ['is_male', 'num_interactions_with_cust_service', 'late_on_payment', 'age', 'years_in_contract']\n for i, feat in enumerate(features):\n if request.args.get(feat) is not None:\n to_predict[0][i] = request.args.get(feat)\n\n response = clf2.predict(to_predict)\n\n if response:\n return \"The customer is likely to churn\"\n else:\n return \"He is a loyal customer\"",
"def predict(toPredict=None):\n\tif not toPredict:\n\t\ttoPredict = gc.generate_test_cases(1)[0]\n\ttoPredict = np.asarray(toPredict)\n\ttoPredict = toPredict.reshape(1, -1)\t\n\tclusterer = joblib.load(\"training_data.pkl\")\n\tprint(clusterer.predict(toPredict))",
"def predict_labels(model):\n test_datagen = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True\n #rescale=1. / 255,\n #samplewise_center=True,\n #samplewise_std_normalization=True\n )\n test_datagen.fit(test_data)\n # datagen.fit(val_data)\n # create generator for train data\n test_generator = test_datagen.flow(\n test_data,\n batch_size=batch_size,\n shuffle=False)\n pred_prob=model.predict_generator(test_generator,test_data.shape[0])\n pred_prob=pred_prob[:,0]\n def pre_class(x):\n \tif x<0.5:\n return 0\n else:\n return 1\n #def true_label(id):\n #\tif 'f0' in id:\n #\t return 0\n # elif 'f1' in id: \n # return 1\n #\telse:\n #\t pass\n #pred_true=map(true_label,test_id)\n #pred_true=np.array(pred_true)\n #print roc_auc_score(val_target, pred_prob)\n #prediction=map(pre_class,pred_prob)\n #print confusion_matrix(val_target,prediction)\n with open(\"prediction.csv\", \"w\") as f: \n\tp_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n for id,label in zip(test_id,pred_prob):\n\t p_writer.writerow([id, label])\n\t\n #base_path = \"PZ/test/test/\"\n\n #with open(\"prediction.csv\", \"w\") as f:\n # p_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n # for _, _, imgs in os.walk(base_path):\n # for im in imgs:\n # pic_id = im.split(\".\")[0]\n #img = cv2.imread(base_path+im)\n #img = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)\n #img = img.transpose((2,0,1))\n #img = np.expand_dims(img,axis=0)\n #img = load_img(base_path + im)\n #img = imresize(img, size=(img_height, img_width))\n #test_x = img_to_array(img).reshape(3, img_height, img_width)\n #test_x = test_x.reshape((1,) + test_x.shape)\n #test_datagen.fit(img)\n #test_generator = test_datagen.flow(img,\n # batch_size=1,\n # shuffle=False)\n #prediction = model.predict_generator(test_generator, 1)\n #p_writer.writerow([pic_id, prediction])",
"def predict(wav, labels, graph, input_name, output_name, how_many_labels):\n pred_lab, pred_prob=label_wav(wav, labels, graph, input_name, output_name, how_many_labels)\n return pred_lab, pred_prob",
"def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class",
"def test_text_classifier_get_labels(self):\n pass",
"def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label",
"def predict(self, instances):\r\n raise NotImplementedError",
"def predict_label(examples_set):\n all_labels = list(('yes', 'no'))\n prediction = 'no'\n\n for label in all_labels:\n all_same_label = True\n for example in examples_set:\n if example[14] != label:\n all_same_label = False\n break\n if all_same_label:\n prediction = label\n break\n return prediction",
"def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels",
"def predict(self, testFeatures): \r\n\r\n if(not self._fitCalled):\r\n print('The fit method has not been called yet')\r\n return None\r\n\r\n l,d = testFeatures.shape\r\n n,d = self.data.shape \r\n\r\n \"\"\" Fill and return this in your implementation. \"\"\"\r\n predictions = np.empty(shape=(l,), dtype=self.labels.dtype)\r\n\r\n \"\"\" Implement kNN prediction here. \"\"\"\r\n\r\n for i in range(0, l):\r\n distances = []\r\n for j in range(0, n):\r\n distances.append((np.sqrt(np.sum((testFeatures[i]-self.data[j])**2)), self.labels[j]))\r\n distances.sort()\r\n kNearestLabels = [x[1] for x in distances][0:self.k]\r\n most_common, num_most_common = Counter(kNearestLabels).most_common(1)[0]\r\n predictions[i] = most_common\r\n return predictions",
"def predict():\n import trace\n trace.predict()",
"def predict(self, conf):\n conf.set_int(\"angel.worker.matrix.transfer.request.timeout.ms\", 60000)\n predict(conf, conf._jvm.com.tencent.angel.ml.classification.lr.LRModel(conf._jconf, None), 'com.tencent.angel.ml.classification.lr.LRPredictTask')",
"def predict(self, texts):\n\n texts = preprocess_text(texts)\n labels = self.pipeline.predict(texts)\n\n return labels",
"def predict(self, X):\n num_test = X.shape[0]\n # lets make sure that the output type matches the input type\n Ypred = np.zeros(num_test, dtype = self.ytr.dtype)\n\n # loop over all test rows\n for i in range(num_test):\n # find the nearest training image to the i'th test image\n # using the L1 distance (sum of absolute value differences)\n #distances = np.sum(np.abs(self.Xtr - X[i,:]), axis = 1) # L1 distance\n distances = np.sqrt(np.sum(np.square(self.Xtr - X[i, :]), axis=1)) # L2 distance\n min_index = np.argmin(distances) # get the index with smallest distance\n Ypred[i] = self.ytr[min_index] # predict the label of the nearest example\n\n return Ypred",
"def predict_estimator(self, X, y=None, labels=[0, 1]):\n # throw an exception if classifier is not trained\n if not self.classifier_trained:\n raise Exception(\"Train estimator first\")\n if len(X) == 1: # predict doesn't work if only one element\n X.append(\"\")\n labels = labels\n result = []\n input_features = self.sentences_to_features(\n X, [0 for i in range(len(X))])\n predict_input_fn = run_classifier.input_fn_builder(features=input_features,\n seq_length=self.params[\"MAX_SEQ_LENGTH\"],\n is_training=False, drop_remainder=False)\n predictions = self.estimator.predict(predict_input_fn)\n\n # output : tuple (sentence, probability of each class, predicted class)\n for sentence, prediction in zip(X, predictions):\n result.append(\n (sentence, np.exp(prediction['probabilities']), labels[prediction['labels']]))\n\n if y is not None:\n pred = [result[i][2] for i in range(len(result))]\n print(\"Accuracy: %s\" % accuracy_score(y, pred))\n print(\"Precision: %s\" % precision_score(y, pred))\n print(\"Recall: %s\" % recall_score(y, pred))\n print(\"f1: %s\" % f1_score(y, pred))\n print(confusion_matrix(y, pred))\n\n return result",
"def label_predict(self, sentence):\n index_words = FileUtils.index_sentence(sentence, self.word_to_index)\n chunks = FileUtils.divide_sentence(index_words, Settings.seq_size)\n result = np.zeros(Settings.class_num)\n if Settings.cuda:\n self.model.cuda()\n \n for chunk in chunks:\n with torch.no_grad():\n chunk = torch.from_numpy(np.asarray(chunk)).view(1, Settings.seq_size)\n if Settings.cuda:\n chunk = chunk.cuda()\n \n predict = self.model(chunk)\n if Settings.cuda:\n predict = predict.cpu()\n predict = predict.numpy()[0]\n result += predict\n result /= len(chunks)\n\n target_index = np.argmax(result) + 1\n label = self.index_to_label.get(str(target_index))\n score = np.max(result)\n return label, score",
"def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')",
"def predict(self, X):\n\n y_pred = np.zeros(X.shape[0])\n y_pred = np.argmax(np.dot(X,self.W), axis=1)\n ###########################################################################\n # Implement this method. Store the predicted labels in y_pred. #\n ###########################################################################\n\n return y_pred",
"def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]",
"def predict(self):\n for track in self.tracks:\n track.predict(self.kf)\n #track.du_doan(self.kf_test)",
"def gen_labels(loader, model):\r\n y_true, y_pred = [], []\r\n for X, y in loader:\r\n with torch.no_grad():\r\n output = model(X)\r\n predicted = predictions(output.data)\r\n y_true = np.append(y_true, y.numpy())\r\n y_pred = np.append(y_pred, predicted.numpy())\r\n return y_true, y_pred",
"def predict(self, instances):\n\n if self.w is None or self.b is None:\n raise ValueError('Must train learner before prediction.')\n\n fvs, _ = TRIMLearner.get_fv_matrix_and_labels(instances)\n\n labels = fvs.dot(self.w) + self.b\n labels = list(map(lambda x: 1 if x >= 0 else -1, labels))\n\n return labels",
"def predictTest(k, train, test):\r\n\r\n pred_labels = []\r\n\r\n # for each instance in the testing dataset, calculate all L2 distance from all training instances\r\n for te in range(len(test)):\r\n all_D = np.zeros((len(train), 1))\r\n\r\n # calculate the L2 distance of the testing instance from each training instance\r\n for tr in range(len(train)):\r\n D = 0\r\n for var in range(len(train.columns)-1):\r\n # if feature is real-valued, add (testing value - training value)^2\r\n if train[var].dtype == np.float64 or train[var].dtype == np.int64:\r\n D += (test[var][te] - train[var][tr])**2\r\n # if feature is nominal, add 1 if testing and training values are different\r\n else:\r\n if test[var][te] != train[var][tr]:\r\n D += 1\r\n all_D[tr] = D**(1/2)\r\n\r\n # sort all L2 distances, select K closest neighbors, and choose the most prevalent label\r\n all_D = np.column_stack((all_D, np.array(range(len(train)))))\r\n all_D = all_D[np.argsort(all_D[:, 0])]\r\n prob_labels = train[len(train.columns)-1][all_D[0:k, 1]].as_matrix()\r\n pred_labels.append(Counter(prob_labels).most_common(1)[0][0])\r\n\r\n return pred_labels",
"def predict(self, data: List):",
"def predict(self, X, y=None):\n # Check is fit had been called\n check_is_fitted(self, ['X_', 'y_'])\n\n _, Predicted_Labels =\\\n RankSVM_test(test_data=X,\n num_class=self.num_class,\n Weights=self.Weights,\n Bias=self.Bias,\n SVs=self.SVs,\n svm=self.svm, gamma=self.gamma,\n coefficient=self.coefficient,\n degree=self.degree)\n\n return Predicted_Labels",
"def getPredictions(self):\n\t\tself.bestLabel = self.testingProbs.apply(lambda x: x.argmax(),1)",
"def example():\n\n log.info(\"LFW prediction example\")\n\n class_id_to_label_mapping, label_to_class_id_mapping = load_class_id_to_label_mapping(\"lfw\")\n classes = len(class_id_to_label_mapping.keys())\n\n with Predictor(\"lfw\", (250, 250, 3), classes) as p:\n # Predict the first image\n colin_image = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0023.jpg\")\n image = cv.imread(str(colin_image))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))\n\n # Predict second image\n colin_image2 = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0024.jpg\")\n image = cv.imread(str(colin_image2))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))\n\n with Predictor(\"lfw\", (250, 250, 3), classes) as p:\n # Predict the third image in a different session\n colin_image2 = Path(LFW_DIR) / Path(\"Colin_Powell/Colin_Powell_0025.jpg\")\n image = cv.imread(str(colin_image2))\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n x_test = image.reshape(1, image.shape[0], image.shape[1], image.shape[2])\n\n predicted_class_id = p.predict(x_test)\n predicted_class_id = int(np.squeeze(predicted_class_id))\n\n log.info(\"Prediction: %s\" % (class_id_to_label_mapping[str(predicted_class_id)]))",
"def predict(cls, input):\n clf = cls.get_model()\n print('input=')\n print(input)\n return clf.predict(input)",
"def predict(self, model, context, data):\n pass",
"def predict_from(self, inputs, to_layers):",
"def predict(self, test_vectors):\n # Calculate the best matching label for each node\n if not self.ready_for_prediction:\n # totals = sum((node.labels for node in self.codebook), Counter())\n for node in self.codebook:\n # Remove unlabeled hits\n try:\n node.labels.pop(None)\n except KeyError:\n pass\n # # Take into account small clusters. A frequency approach\n # freq_counter = Counter({label: count / totals[label]\n # for label, count in node.labels.items()})\n # if len(freq_counter) > 0:\n # node.label = freq_counter.most_common(1)[0][0]\n # else:\n # node.label = ''\n # Or ignore small clusters and just aim for accuracy\n if len(node.labels) > 0:\n node.label = node.labels.most_common(1)[0][0]\n else:\n node.label = ''\n self.ready_for_prediction = True\n\n # Return the label of the best matching unit for the given test_vectors\n if isinstance(test_vectors, collections.Iterable):\n return [self.bmu(test_vector).label for test_vector in test_vectors]\n else:\n return self.bmu(test_vectors).label",
"def predict(self, df):\n # TODO: REMOVE type column\n\n tokenizer = self.__create_tokenizer_from_hub_module()\n label_list = test_other[LABEL_COLUMN].unique().tolist()\n #label_list = [0, 1]\n test_features = self.__create_features(\n df, label_list,\n self.max_seq_len, tokenizer, 'text', 'type'\n )\n\n preds = []\n if type(self.model) == tf.estimator.Estimator:\n # Is trained\n input_fn = input_fn_builder(\n features=test_features,\n seq_length=self.max_seq_len,\n is_training=False,\n drop_remainder=False)\n pred = self.model.predict(input_fn=input_fn)\n for p in pred:\n preds.append(p)\n else:\n # Is loaded from a SavedModel\n # Format inputs\n inpu = {\n 'label_ids': np.array([x.label_id for x in test_features]).reshape(-1,),\n 'input_ids': np.array([x.input_ids for x in test_features]).reshape(-1, self.max_seq_len),\n 'input_mask': np.array([x.input_mask for x in test_features]).reshape(-1, self.max_seq_len),\n 'segment_ids': np.array([x.segment_ids for x in test_features]).reshape(-1, self.max_seq_len)\n }\n preds = self.model(inpu)\n\n return preds",
"def predict(self, X, k=1):\n dists = self.compute_distances(X)\n return self.predict_labels(dists, k=k)",
"def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes",
"def model_predict(classifier, X_test:list) -> list:\n y_predict = classifier.predict(X_test)\n return y_predict",
"def predict_labels(self, x: list, decode=\"posterior\"):\n assert decode in self.decode, \"decode `{}` is not valid\".format(decode)\n \n if decode is 'posterior':\n return self.posterior_decode(x)\n \n if decode is 'viterbi':\n return self.viterbi_decode(x)",
"def predict(self, df):\n results = [] \n _ds = pdfds.DataFrameDataset(df, self.fields) \n _iter = BucketIterator(_ds, batch_size=16, sort_key=lambda x: len(x.text),\n train=False, sort=True, sort_within_batch=True)\n self.odel.eval()\n with torch.no_grad():\n for (labels, text), _ in _iter:\n labels = labels.type(torch.LongTensor)\n text = text.type(torch.LongTensor)\n _, output = self.model(text, labels)\n sm = torch.nn.Softmax(dim=1)\n results.extend( sm(output).tolist()[1] )\n return results",
"def predict_sparse(train_xml_dir):\n\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n test_xml_dir = os.path.join(base, cfg.get('data', 'test_xml_dir'))\n test_cui_dir = os.path.join(base, cfg.get('data', 'test_cui_dir'))\n\n for f in os.listdir(test_cui_dir):\n\n file_path = os.path.join(test_cui_dir, f)\n file_as_string = open(file_path).read()\n\n category2label = {} # key: selection criterion, value: prediction\n for category in n2b2.get_category_names(train_xml_dir):\n\n # could not train model; always predict 'not met'\n if category == 'KETO-1YR':\n category2label[category] = 'not met'\n print('file: %s, crit: %s, label: %s' % (f, category, 'not met'))\n continue\n\n vectorizer_pickle = 'Model/%s.vec' % category\n vectorizer = pickle.load(open(vectorizer_pickle, 'rb'))\n x = vectorizer.transform([file_as_string])\n\n classifier_pickle = 'Model/%s.clf' % category\n classifier = pickle.load(open(classifier_pickle, 'rb'))\n prediction = classifier.predict(x)\n label = dataset.INT2LABEL[prediction[0]]\n\n category2label[category] = label\n print('file: %s, crit: %s, label: %s' % (f, category, label))\n\n xml_file_name = f.split('.')[0] + '.xml'\n xml_file_path = os.path.join(test_xml_dir, xml_file_name)\n n2b2.write_category_labels(xml_file_path, category2label)\n print()",
"def get_train_labels(self):\n raise NotImplementedError",
"def predict_proba(self):\n ...",
"def fit_predict(self):\n self.classifier = self.model\n self.classifier.fit(self.X_sample, self.y_sample)\n self.y_pred = self.classifier.predict(self.X_test)",
"def predict(model, images):\n return model.predict_classes(images)",
"def predict(model):\n # load test dataset\n test = dict(json.load(open('util_files/test.json')))\n ids = test['ids']\n data = test['data']\n\n df = pd.read_csv('data/document_departments.csv')\n labels = dict(df.values.tolist())\n\n id2cls = dict(json.load(open('util_files/id2cls.json')))\n \n ytrue = []\n ypredicted = []\n \n for i in range(len(data)):\n \n prediction = np.argmax(model.predict_on_batch(np.expand_dims(data[i], axis=0)))\n \n ypredicted.append(id2cls[str(prediction)])\n \n cls = labels[int(ids[i])]\n ytrue.append(cls)\n \n print \"classification report\"\n print classification_report(y_true=ytrue,\n y_pred=ypredicted)\n \n print \"*********************\"\n print \"Accuracy on test set\"\n print accuracy_score(y_true=ytrue,\n y_pred=ypredicted)\n print \"*********************\"",
"def predict(self):\n self.canv.update()\n ps = self.canv.postscript(colormode='mono')\n img = Image.open(io.BytesIO(ps.encode('utf-8')))\n img.save('result.png')\n x = Predict.transform_image(self)\n \n #prediction with multivariate regression\n Y_hat_test = self.multivariate_model.predict([x])\n C_multivariate = map(np.argmax, Y_hat_test) # classification vector\n C_multivariate = list(C_multivariate)\n multivariate_predict = C_multivariate[0]\n\n \n #prediction with Linear Discriminant Analysis (LDA)\n lda_predict = self.lda_model.predict([x])[0]\n qda_predict = self.qda_model.predict([x])[0]\n log_predict = self.log_model.predict([x])[0]\n \n baseline_label = Label(self, text='Baseline: ' + str(multivariate_predict) )\n baseline_label.grid(row=0, column=1, padx=5, pady=5)\n lda_label = Label(self, text=' LDA: '+ str(lda_predict))\n lda_label.grid(row=0, column=2, padx=5, pady=5)\n qda_label = Label(self, text='QDA: '+ str(qda_predict))\n qda_label.grid(row=1, column=1, padx=5, pady=5)\n log_label = Label(self, text=' Logistic: '+str(log_predict))\n log_label.grid(row=1, column=2, padx=5, pady=5)",
"def test_intent_classifier_get_labels(self):\n pass"
] |
[
"0.7386694",
"0.71547484",
"0.70753515",
"0.6984876",
"0.6951311",
"0.6930104",
"0.69105846",
"0.68986803",
"0.6878426",
"0.6850824",
"0.6850824",
"0.6849669",
"0.6848837",
"0.684592",
"0.684592",
"0.682805",
"0.6805592",
"0.67691106",
"0.6756331",
"0.67532843",
"0.67532843",
"0.67532843",
"0.6691279",
"0.6677265",
"0.6656309",
"0.66450447",
"0.66449463",
"0.6641344",
"0.6635007",
"0.6633174",
"0.66262203",
"0.66257405",
"0.6620934",
"0.6613161",
"0.66081446",
"0.66041934",
"0.6573761",
"0.65727824",
"0.6570542",
"0.6570498",
"0.655973",
"0.65594786",
"0.65594786",
"0.65594786",
"0.65570015",
"0.65533847",
"0.6551079",
"0.6550744",
"0.654907",
"0.65367466",
"0.65352887",
"0.65352887",
"0.65352887",
"0.6531807",
"0.651425",
"0.651358",
"0.6509242",
"0.6509018",
"0.65011585",
"0.6493365",
"0.64912486",
"0.64847577",
"0.6482435",
"0.6479967",
"0.647828",
"0.6473929",
"0.6472108",
"0.6470946",
"0.6465842",
"0.6449308",
"0.6445628",
"0.6444571",
"0.64425635",
"0.64397216",
"0.64273095",
"0.64267737",
"0.6425407",
"0.64169765",
"0.64133584",
"0.64091134",
"0.6407678",
"0.6406667",
"0.6403767",
"0.63963354",
"0.6389814",
"0.63808095",
"0.63799775",
"0.63781047",
"0.6371689",
"0.63679314",
"0.6364728",
"0.6364576",
"0.636432",
"0.6362331",
"0.6344946",
"0.6341459",
"0.63337183",
"0.6333554",
"0.633015",
"0.63237214",
"0.6321038"
] |
0.0
|
-1
|
Scores are computed on the test set
|
def show_score(clf, X_test, y_test):
y_pred = predict(clf, X_test)
print metrics.classification_report(y_test.astype(np.int), y_pred)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def score(self, test_data):\n\n\t\tpass",
"def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\t\treturn self.model.score(ins, outs)",
"def scoring(self):\n pass",
"def test_score():\n print(\"Tests for 'score' function\")\n test_suite = TestSuite()\n\n # Testing with empty hand\n result = score([])\n test_suite.run_test(result, 0, '0')\n # Testing with non-empty hand\n result = score([1, 3])\n test_suite.run_test(result, 3, '1')\n # Testing with non-empty hand\n result = score([1, 3, 1, 1])\n test_suite.run_test(result, 3, '2')\n # Testing with non-empty hand\n result = score([4, 3, 4, 3, 3])\n test_suite.run_test(result, 9, '3')\n\n # Show report\n test_suite.report_results()",
"def test_get_score():\n\n assert sequence_threshold.get_score([]) == 0\n assert sequence_threshold.get_score(SortedSet()) == 0\n assert sequence_threshold.get_score(list(range(3, 36))) == 3\n assert sequence_threshold.get_score([10, 11, 12, 14, 16, 17]) == 10 + 14 + 16",
"def test_scores(self) -> np.ndarray:\n return np.asarray(self.test_metric_dict[self.metric_name])",
"def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 15.0)\r\n self.assertEqual(score_dict['total'], 5.0)",
"def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)",
"def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)",
"def score(self):",
"def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass",
"def test_get_score(self):\r\n score_dict = self.combinedoe.get_score()\r\n self.assertEqual(score_dict['score'], 0)\r\n self.assertEqual(score_dict['total'], 1)",
"def get_score(self, a, b):\n ### FILL IN ###",
"def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass",
"def get_scores(self):\n return self.score",
"def test_compute_unnormalized_scores(self):\n # todo: implement this test!\n pass",
"def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)",
"def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores",
"def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count",
"def get_score(self, student_answers):\r\n pass",
"def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score",
"def _compute_normalised_scores(self):\n\n results = self.snapshot['results']\n assg = AssignmentConfig().get_assignment()\n\n if results:\n self.snapshot['best_average_bugs_detected'] = \\\n max([results[submitter]['average_bugs_detected'] for submitter in results])\n self.snapshot['best_average_tests_evaded'] = \\\n max([results[submitter]['average_tests_evaded'] for submitter in results])\n\n for submitter in results.keys():\n submitter_bugs_detected = float(results[submitter]['average_bugs_detected'])\n submitter_tests_escaped = float(results[submitter]['average_tests_evaded'])\n\n results[submitter]['normalised_test_score'] = assg.compute_normalised_test_score(\n submitter_bugs_detected, self.snapshot['best_average_bugs_detected'],\n self.snapshot['results'][submitter]['average_tests_per_suite']\n )\n\n results[submitter]['normalised_prog_score'] = assg.compute_normalised_prog_score(\n submitter_tests_escaped, self.snapshot['best_average_tests_evaded']\n )\n\n # The current scoring algo for tests doesn't give the best test suite a maximums score.\n # re-normalise to make this happen\n best_test_score = max([results[submitter]['normalised_test_score'] for submitter in results.keys()])\n if best_test_score == 0:\n best_test_score = 1\n for submitter in results.keys():\n new_score = round(results[submitter]['normalised_test_score'] * (2.5 / best_test_score), 2)\n results[submitter]['normalised_test_score'] = new_score",
"def get_scores(self):\n return SklearnModel.evaluate_no_ground_truth_classifier_metrics(self.X_test, self.predictions)",
"def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break",
"def test_scoring(self):\n scores = score_words(['foo', 'far', 'has', 'car'])\n expected = [(7, 'far'), (6, 'car'), (5, 'has'), (4 , 'foo')]\n self.assertEqual(scores, expected)",
"def score(self):\n raise NotImplementedError()",
"def get_score(self):\n for response in self.response_list:\n self.score += response.get_score",
"def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate",
"def calculate_scores(self):\n # Prediction based scores\n #self.report = classification_report(self.y_test, self.y_pred)\n self.accuracy = accuracy_score(self.y_real, self.y_pred)\n self.precision = precision_score(self.y_real, self.y_pred)\n self.recall = recall_score(self.y_real, self.y_pred)\n self.f1 = f1_score(self.y_real, self.y_pred)\n \n # Probability based scores\n self.fpr, self.tpr, _ = roc_curve(self.y_real, self.y_proba)\n self.average_precision = average_precision_score(self.y_real, self.y_proba)\n self.brier_loss = brier_score_loss(self.y_real, self.y_proba)\n self.roc_auc = roc_auc_score(self.y_real, self.y_proba)\n self.prec_cur, self.recall_cur, _ = precision_recall_curve(self.y_real, self.y_proba)",
"def scores_(self):\n return self.predictor.scores_",
"def test_create_total_scaled_score(self):\n score_test = score.ScoresGenerator()\n expected = 0\n score_test.create_total_scaled_score()\n self.assertEqual(score_test.SCALED_SCORES[KEY_TOTAL], expected)",
"def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100",
"def score(self, X, y):\n ...",
"def _get_scores(target, predicted):\n recall = scoring(target, predicted, metric=\"recall\")\n precision = scoring(target, predicted, metric=\"precision\")\n accuracy = scoring(target, predicted, metric=\"accuracy\")\n f_score = scoring(target, predicted, metric=\"f1\")\n\n return [recall, precision, accuracy, f_score]",
"def update_score():\n pass",
"def score(self, X, y=...):\n ...",
"def test_update_score_multiple(self):\r\n self.update_score_multiple()\r\n score = self.openendedmodule.latest_score()\r\n self.assertEquals(score, 1)",
"def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)",
"def score(name):\r\n return (sorted(test).index(name)+1)*value(name)",
"def score(self, X, y, predict_results=None, style=\"accuracy\"):\n results = predict_results\n if results is None:\n results = np.reshape(self.predict(X)[0], np.shape(y))\n if style=='accuracy':\n correct = 0\n for scored, expected in zip(results, y):\n if scored == expected:\n correct += 1\n return 0 if len(results) == 0 else (correct / len(results)) * 100.0\n if style=='mse':\n summer = 0\n count = 0\n for scored, expected in zip(results, y):\n summer = summer + ((scored - expected) ** 2)\n count = count + 1\n return summer / count",
"def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score",
"def score_samples(self, X):\n ...",
"def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total",
"def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total",
"def test_SetPlayerPuzzleScores_multiple(self):\r\n orig_score = 0.07\r\n puzzle_id = '1'\r\n response = self.make_puzzle_score_request([puzzle_id], [orig_score])\r\n\r\n # There should now be a score in the db.\r\n top_10 = Score.get_tops_n(10, puzzle_id)\r\n self.assertEqual(len(top_10), 1)\r\n self.assertEqual(top_10[0]['score'], Score.display_score(orig_score))\r\n\r\n # Reporting a better score should overwrite\r\n better_score = 0.06\r\n response = self.make_puzzle_score_request([1], [better_score])\r\n\r\n top_10 = Score.get_tops_n(10, puzzle_id)\r\n self.assertEqual(len(top_10), 1)\r\n\r\n # Floats always get in the way, so do almostequal\r\n self.assertAlmostEqual(\r\n top_10[0]['score'],\r\n Score.display_score(better_score),\r\n delta=0.5\r\n )\r\n\r\n # reporting a worse score shouldn't\r\n worse_score = 0.065\r\n response = self.make_puzzle_score_request([1], [worse_score])\r\n\r\n top_10 = Score.get_tops_n(10, puzzle_id)\r\n self.assertEqual(len(top_10), 1)\r\n # should still be the better score\r\n self.assertAlmostEqual(\r\n top_10[0]['score'],\r\n Score.display_score(better_score),\r\n delta=0.5\r\n )",
"def test_scores(self):\n pig = game.pig.Pig('PlayerA', 'PlayerB', 'PlayerC')\n self.assertEqual(\n pig.get_score(),\n {\n 'PlayerA': 0,\n 'PlayerB': 0,\n 'PlayerC': 0\n }\n )",
"def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)",
"def __calculateNormalizedScores(self):\n year_scores = {0 : []}\n for venue in self.venue_scores:\n v_scores = []\n for year in self.venue_scores[venue]:\n v_scores.append(self.venue_scores[venue][year])\n if year not in year_scores:\n year_scores[year] = []\n year_scores[year].append(self.venue_scores[venue][year])\n x_year = np.average(np.array(v_scores))\n self.venue_scores[venue][0] = x_year\n year_scores[0].append(x_year)\n \n ##for standardization\n #year_metrics = {x : (np.average(np.array(year_scores[x])), np.std(np.array(year_scores[x]))) for x in year_scores}\n ##for normalization\n year_metrics = {x: (max(year_scores[x]), min(year_scores[x])) for x in year_scores}\n \n #print year_metrics\n \n for venue in self.venue_scores:\n self.normalized_scores[venue] = dict()\n for year in self.venue_scores[venue]:\n #self.standard_scores[venue][year] = round((self.venue_scores[venue][year] - year_metrics[year][0]) / year_metrics[year][1],5)\n #self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1]) / (year_metrics[year][0] - year_metrics[year][1]) + eps\n self.normalized_scores[venue][year] = (self.venue_scores[venue][year] - year_metrics[year][1] + self.epsilon) / (year_metrics[year][0] - year_metrics[year][1] + self.epsilon)",
"def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)",
"def score(self,*val):\n if len(val):\n self._score = val[0]\n self.evaluated = 1\n else: self.evaluate()\n return self._score",
"def score(self,ytest,how='score'):\n scores = []\n #iterate through each pred for each nn value\n for pred in self.ypred:\n sc = np.empty(pred.shape[1]) #need to store the scores\n\n for i in range(pred.shape[1]):\n\n p = pred[:,i]\n\n if how == 'score':\n sc[i] = utilities.score(p, ytest[:,i])\n\n if how == 'corrcoef':\n\n sc[i] = utilities.corrcoef(p, ytest[:,i])\n\n scores.append(sc)\n\n scores = np.vstack(scores)\n return scores",
"def score(self, y_true, y_pred):\r\n pass",
"def _score(self, x, seq):\n pass",
"def test_get_score(self):\r\n\r\n score_dict = self.get_score(True, 3, 3)\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(score_dict[\"score\"], 1.0)\r\n\r\n # Testing score after data is stored in student_data_for_location in xmodule.\r\n _score_dict = self.peer_grading.get_score()\r\n\r\n # Score should be 1.0.\r\n self.assertEqual(_score_dict[\"score\"], 1.0)",
"def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts",
"def compute_scores(self, *scorers):\n if self.nodes[0]:\n list_ = self.nodes\n else:\n list_ = self.reaction_trees\n\n for idx, item in enumerate(list_):\n scores = {repr(scorer): scorer(item) for scorer in scorers}\n self.all_scores[idx].update(scores)\n self._update_route_dict(self.all_scores, \"all_score\")",
"def muc_scores(self):\n A_card, B_card = self.shape\n V_card = len(self)\n N = self.grand_total\n\n recall = _div(N - V_card, N - A_card)\n precision = _div(N - V_card, N - B_card)\n fscore = hmean(recall, precision)\n return precision, recall, fscore",
"def getScores(self,query):\n pass",
"def f1_score(model_id, test_set_id, rubric_id):\n result = {'true_positive': 0, 'false_positive': 0, 'true_negative': 0, 'false_negative': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_result(model_id, test_set_id, rubric_id)\n\n for key in rubrication_result:\n if rubrication_result[key] == answers[key]:\n if rubrication_result[key] == 1:\n result['true_positive'] += 1\n else:\n result['true_negative'] += 1\n else:\n if rubrication_result[key] == 1:\n result['false_positive'] += 1\n else:\n result['false_negative'] += 1\n if (result['true_positive'] + result['false_positive']) > 0:\n result['precision'] = result['true_positive'] / (result['true_positive'] + result['false_positive'])\n else:\n result['precision'] = 0\n if (result['true_positive'] + result['false_negative']) > 0:\n result['recall'] = result['true_positive'] / (result['true_positive'] + result['false_negative'])\n else:\n result['recall'] = 0\n if (result['precision'] + result['recall']) > 0:\n result['f1'] = 2 * result['precision'] * result['recall'] / (result['precision'] + result['recall'])\n else:\n result['f1'] = 0\n return result",
"def test_score_groups(test_input, expected):\n score = sp.score_groups(test_input)\n assert score == expected",
"def get_target_per_score(self):\n pass",
"def zscore(vals):",
"def get_scores(self):\n precision = self.right / self.count\n APs = self.right_labels / self.count\n mAP = np.mean(APs)\n distance = self.distance / self.count\n\n return {'precision': precision,\n 'APs': APs,\n 'mAP': mAP,\n 'distance': distance\n }",
"def _score(self, ModifiedWeights):\r\n \r\n UnflattenedWeights = self._UnflattenWeights(WeightsStrucure = self.WeightsStrucure, ModifiedWeights = ModifiedWeights)\r\n self.KerasModels.set_weights(UnflattenedWeights)\r\n test_on_batch = self.KerasModels.test_on_batch(X_train, y_train, sample_weight=None) # return ['loss', 'acc']\r\n return test_on_batch[1]",
"def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores",
"def _testScoreGeneric(testcase, sigma=0.2, num_repl=3):\n # Fit to training data \n testcase.mclf.fit(testcase.dfs_train, testcase.ser)\n # Score on training data\n result1 = testcase.mclf.score(testcase.dfs_train[0], testcase.ser)\n # Score on test data\n result2 = testcase.mclf.score(testcase.df_test, testcase.ser)\n #\n testcase.assertGreater(result1.abs, result2.abs)",
"def evaluate(self, training_scores, original_test_scores, imitation_test_scores):\n\n #finding a threshold: third to smallest training score\n sorted_scores = np.sort(training_scores)\n threshold = sorted_scores[2]\n\n #computing the number of errors\n errors = len(np.where(original_test_scores < threshold)[0])\n errors += len(np.where(imitation_test_scores > threshold)[0])\n\n #computing the local accuracy\n accuracy = 1 - errors/(len(original_test_scores)+len(imitation_test_scores))\n return accuracy, threshold",
"def _compute_scores(y_pred, y_true):\n auc = accuracy_score(y_true = y_true, y_pred = y_pred)\n pre = precision_score(y_true, y_pred, average = \"macro\")\n rec = recall_score(y_true, y_pred, average = \"macro\")\n f1 = f1_score(y_true, y_pred, average = \"macro\")\n\n return pd.Series(data = [auc, pre, rec, f1], index = ['acc', 'pre', 'rec', 'f1'])",
"def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )",
"def get_scores(self):\n hist = self.confusion_matrix\n # hist = [TN,FP;FN,TP]\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n # iou = iu.sum() / self.n_classes\n mean_iou = np.nanmean(iu) # if classes = 2: iou = miou\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n cls_iou = dict(zip(range(self.n_classes), iu))\n\n ##############################################\n tn = hist[0, 0]\n tp = np.diag(hist).sum() - tn\n fp = np.triu(hist, 1).sum()\n fn = np.tril(hist, -1).sum()\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n f1 = 2 * precision * recall / (precision + recall)\n\n # for medical img, img_seg \\in [0,1]\n dice = 2 * tp / (tp + tp + fn + fp)\n # dice = f1-score\n dsc = 2 * tp / (tp + fn + fp)\n # dsc = jaccard\n # voe = 2 * abs(fp + fn) / (tp + tp + fn + fp)\n # voe = 1 - dsc\n\n k2 = {\n # \"Overall Acc: \\t\": acc,\n 'Mean Acc': float(judge_nan(acc_cls)),\n # \"FreqW Acc : \\t\": fwavacc,\n 'Mean IoU': float(judge_nan(mean_iou)),\n 'F1-score': float(judge_nan(f1)),\n 'DSC': float(judge_nan(dsc)),\n 'Precision': float(judge_nan(precision)),\n 'Recall': float(judge_nan(recall)),\n }\n\n return k2",
"def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass",
"def __score_t(self, *args, **kwargs):\n pass",
"def score(self, X, y):\n raise NotImplementedError('Abstract method \"score\" must be '\n 'specialised!')",
"def disp_score():",
"def test_boxscore_scores(self):\n test_hteam_totals = self.BS.hTeam_totals['points']\n answer_hteam_totals = '140'\n test_vteam_totals = self.BS.vTeam_totals['points']\n answer_vteam_totals = '111'\n\n self.assertEqual(test_hteam_totals, answer_hteam_totals)\n self.assertEqual(test_vteam_totals, answer_vteam_totals)",
"def update_scores(self, score):\n self.result_list.append(score)\n\n if self.best_score == 0 and self.worst_score == 0:\n self.best_score = score\n self.worst_score = score\n\n if score < self.best_score:\n self.best_score = score\n\n if score > self.worst_score:\n self.worst_score = score",
"def getScore(data):\n return score",
"def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)",
"def _score_to_decision(self, score):",
"def f1_score(self):",
"def get_polyscore(self,X_test=None,y_test=None,metric='adjusted_r2'):\n X = self.get_points()\n y_pred = self.get_polyfit(X)\n train_score = score(self._model_evaluations, y_pred,metric, X=X)\n if X_test is not None and y_test is not None:\n y_pred_test = self.get_polyfit(X_test)\n test_score = score(y_test,y_pred_test,metric,X=X_test)\n return train_score, test_score\n else:\n return train_score",
"def _test_scores(lines):\n y_true, y_pred = zip(*[l.split()[-2:] for l in lines if len(l) > 0])\n res = report(score(y_true, y_pred))\n assert res.strip() == gold(lines).decode().strip()",
"def probabilities_score(model_id, test_set_id, rubric_id):\n result = {'true_average_probability': 0, 'false_average_probability': 0}\n # right answers\n answers = db.get_rubric_answers(test_set_id, rubric_id)\n # rubrication results\n rubrication_result = db.get_rubrication_probability(model_id, test_set_id, rubric_id)\n\n true_number = 0\n true_probability = 0\n false_number = 0\n false_probability = 0\n\n for key in rubrication_result:\n if answers[key]:\n true_number += 1\n true_probability += rubrication_result[key]\n else:\n false_number +=1\n false_probability += rubrication_result[key]\n\n if true_number:\n result['true_average_probability'] = true_probability / true_number\n\n if false_number:\n result['false_average_probability'] = false_probability / false_number\n\n return result",
"def computeFScores(self, targetLabels, actualLabels):\r\n if self.prMeasures is None:\r\n self.prMeasures = self.computePRMeasures(targetLabels, actualLabels)\r\n if self.prMeasures[0] == 0:\r\n return 0\r\n self.f1score = 2 * self.prMeasures[0] * self.prMeasures[1] / (0.0 + self.prMeasures[0] + self.prMeasures[1])\r\n return self.f1score",
"def get_overall_score(self, user):\n\n quizzes = ['iq', 'math', 'english']\n\n prev_scores = []\n new_scores = []\n\n for quiz in quizzes:\n quiz_obj = self.get_object(quiz)\n queryset = self.get_queryset(user, quiz_obj)\n\n try:\n new_scores.append(queryset[0].marks)\n prev_scores.append(queryset[1].marks)\n except:\n new_scores.append(queryset[0].marks)\n prev_scores.append(0)\n\n import statistics\n\n return statistics.mean(prev_scores), statistics.mean(new_scores)",
"def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores",
"def scores(self) -> List[float]:\n if not self.prediction:\n return []\n return [sentence.score for sentence in self.prediction.sentences]",
"def get_score(self):\n return tuple(self.score)",
"def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc",
"def take_test(exam, student):\n\n student.score = exam.administer()\n return student.score",
"def score_of_nodes(self, score):\n for hypervisor_id in self.model.get_all_hypervisors():\n hypervisor = self.model. \\\n get_hypervisor_from_id(hypervisor_id)\n count = self.model.get_mapping(). \\\n get_node_vms_from_id(hypervisor_id)\n if len(count) > 0:\n result = self.calculate_score_node(hypervisor)\n else:\n # The hypervisor has not VMs\n result = 0\n if len(count) > 0:\n score.append((hypervisor_id, result))\n return score",
"def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score",
"def score(self):\n self.set_idx()\n if self.idx:\n diffs = self.diffs()\n weights = self.weights\n return np.sum(weights * diffs) / np.sum(weights)\n else:\n return 0.0",
"def evaluate(self):\n scores = []\n scores.append(self.word_analogy())\n print(\"Word Analogy (acc): \", scores[0])\n scores.append(self.word_similarity())\n print(\"Word Similarity (MSE): \", scores[1])\n scores.append(self.concept_categorization())\n print(\"Concept Categorization (purity): \", scores[2])\n scores.append(self.sentiment_analysis())\n print(\"Sentiment Analysis (acc): \", scores[3])\n return scores",
"def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]",
"def test_get_all_scaled_scores_success(self):\n with mock.patch('score.ScoresGenerator.split_data') as mock_split_data:\n with mock.patch('score.ScoresGenerator.create_category_scaled_score') \\\n as mock_scaled_category:\n with mock.patch('score.ScoresGenerator.create_total_scaled_score') \\\n as mock_scaled_total:\n for test in self.success_get_all_scaled_score_test_params:\n score_test = score.ScoresGenerator()\n score_test.get_all_scaled_scores(test[KEY_INPUT])\n self.assertDictEqual(score_test.SCALED_SCORES, test[KEY_EXPECTED])",
"def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)",
"def run_tests(self) -> float:\n print(f\"Grading {self._test_case.__name__}\", file=sys.stderr)\n print(file=sys.stderr)\n for test, weight in self.test_weights.items():\n print(f\"Running {test.__name__}\", file=sys.stderr)\n if test.__doc__:\n print(test.__doc__, file=sys.stderr)\n result = self.run(test)\n if result.wasSuccessful():\n print(f\"Points: {weight}/{weight}\", file=sys.stderr)\n else:\n print(file=sys.stderr)\n print(\n \"Test failed with the error below, displayed between lines of ---.\",\n file=sys.stderr,\n )\n print(\n \"The expected value is given first, followed by the actual result.\",\n file=sys.stderr,\n )\n print(\"-\" * 70, file=sys.stderr)\n # Get the error/failure\n try:\n print(result.errors[0][1], file=sys.stderr)\n except IndexError:\n pass\n try:\n print(result.failures[0][1], file=sys.stderr)\n except IndexError:\n pass\n print(\"-\" * 70, file=sys.stderr)\n print(f\"Points: {0.0}/{weight}\", file=sys.stderr)\n print(file=sys.stderr)\n print(\"=\" * 70, file=sys.stderr)\n return self.grade",
"def set_scores(apps, schema_editor):\n\n Game = apps.get_model(\"stats\", \"Game\")\n for game in Game.objects.all():\n score_allies = 0\n score_opponents = 0\n player_stats = game.playerstat_set.all()\n for stat in player_stats:\n if stat.is_opponent:\n score_opponents += stat.scored\n else:\n score_allies += stat.scored\n\n game.score_allies = score_allies\n game.score_opponents = score_opponents\n game.save()",
"def compute_scores(self):\n if self.num_classes == 2:\n score_1 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold=0.5,\n )[1]\n\n score_2 = self.competition_metric(\n helmet_threshold=0.5,\n impact_threshold_ratio=0.5,\n )[1]\n\n score_3 = self.competition_metric(\n impact_threshold=0.5,\n )[1]\n else:\n score_1 = self.detection_metric(threshold=0.1)\n score_2 = self.detection_metric(threshold=0.25)\n score_3 = self.detection_metric(threshold=0.5)\n\n return score_1, score_2, score_3",
"def compute_score(self):\n for i in xrange(FRAMES):\n # STRIKE\n if self.frames[i][0] == 10:\n # CONSECUTIVE STRIKE\n if self.frames[i + 1][0] == 10:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 2][0])\n else:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 1][1])\n # SPARE\n elif (self.frames[i][0] + self.frames[i][1] == 10):\n self.scores.append(self.frames[i][0] + self.frames[i][1] +\n self.frames[i + 1][0])\n # NEITHER\n else:\n self.scores.append(self.frames[i][0] + self.frames[i][1])\n # Total Score\n for score in self.scores:\n self.score += score"
] |
[
"0.7901326",
"0.7223528",
"0.71334326",
"0.701038",
"0.6967124",
"0.6966751",
"0.6931556",
"0.6913712",
"0.6913712",
"0.6911562",
"0.6869426",
"0.68653387",
"0.6790049",
"0.6784269",
"0.67793673",
"0.6718337",
"0.66967857",
"0.66949916",
"0.666959",
"0.66609585",
"0.66582024",
"0.665287",
"0.66459334",
"0.6643349",
"0.6641711",
"0.6633968",
"0.6631586",
"0.6552934",
"0.65299577",
"0.6526923",
"0.65221506",
"0.65100175",
"0.64897305",
"0.64832217",
"0.6459278",
"0.644981",
"0.6444351",
"0.644234",
"0.644104",
"0.64374316",
"0.64328",
"0.64318126",
"0.6423888",
"0.6423888",
"0.6407526",
"0.640689",
"0.64022565",
"0.6390295",
"0.63864934",
"0.63794327",
"0.63765866",
"0.63751656",
"0.63722634",
"0.6366903",
"0.6364913",
"0.634919",
"0.6348609",
"0.6347097",
"0.6342064",
"0.6336332",
"0.63227016",
"0.63192856",
"0.63165355",
"0.6313652",
"0.63051856",
"0.6300141",
"0.6297557",
"0.6290876",
"0.62820786",
"0.6265521",
"0.6263399",
"0.62603736",
"0.6258237",
"0.6257209",
"0.62561715",
"0.62533605",
"0.62398076",
"0.622764",
"0.6225297",
"0.6221362",
"0.62194175",
"0.6214197",
"0.6201256",
"0.6199032",
"0.6194608",
"0.6193266",
"0.6193106",
"0.61904085",
"0.618224",
"0.61776495",
"0.61720324",
"0.61699146",
"0.61686563",
"0.61607367",
"0.6160594",
"0.61574984",
"0.61569077",
"0.6150058",
"0.6148306",
"0.6147349",
"0.61388785"
] |
0.0
|
-1
|
When a user asks for a potential_category, rank the possible categories by relevance and return the top match
|
def find_match(potential_category: str, categories: List[str]):
return process.extractOne(potential_category, categories)[0]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def FoodRank(search_term):\n return _ranks[search_term.lower()]",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category",
"def _look_in_concordance(self, term, concordance):\n\n suggested = dict()\n words = [word.strip(',.:;*').lower() \\\n for word in str(self.tree.item(term)['values'][0]).split(' ')]\n# messagebox.showwarning(\"_look_in_concordance\",\"words={}\".format(words))\n for word in words:\n if word in concordance:\n for item in concordance[word]:\n if item in suggested:\n suggested[item] += 1\n else:\n suggested[item] = 1\n# if word == 'ad':\n# messagebox.showwarning(\"word 'ad' suggested?\",\"suggested={}\".format(suggested))\n# pass\n rank = sorted(suggested, key=suggested.get, reverse=True)\n for item in rank:\n if item not in self.tree.get_children(term):\n self.tree.insert(term,'end', \\\n values=[self.tree.item(item)['values'][0], \\\n self.tree.item(item)['values'][1]],\\\n text='possible', tags=('suggestions',))\n if len(rank) > 0 and self.tree.parent(term) != 'suggestions':\n for child in self.tree.get_children(term):\n self.tree.item(item, tags='suggestions')\n self.tree.item(term, tags='suggestions')\n self.tree.move(term, 'suggestions', 'end')",
"def search_categorie(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.categories, f.categories.id, f.categories.name)\n q.where().equal(f.categories.name, _input)\n categorie_data = j.executeQuery(q)\n\n if categorie_data: \n cat_id, cat_name = categorie_data[0]\n examples = _create_examples(j.list_word_by_categorie, cat_name)\n return SelectorResult('categorie', cat_id, cat_name, *examples)",
"def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d",
"async def search_by_product_or_category(\n conn, cursor, product: str = \"\", category: str = \"\"\n) -> List[str]:\n\n if (not product) and (not category):\n filter_term = \"\"\n elif product and category:\n filter_term = (\n f\"\\n WHERE product = '{product}' AND category = '{category}'\"\n )\n elif product:\n filter_term = f\"\\n WHERE product = '{product}'\"\n else:\n filter_term = f\"\\n WHERE category = '{category}'\"\n\n statement = f\"\"\"\n SELECT product.name as product,\n product.description as description,\n product.category as category,\n supplier_product.price as price,\n supplier_product.supplier as supplier,\n supplier_product.price as price,\n product.rating as product_rating,\n supplier.rating as supplier_rating,\n ROUND(((product.rating + supplier.rating)/2),2) as combined_rating,\n product.last_updated as last_updated \n FROM product \n INNER JOIN supplier_product\n ON product.name = supplier_product.product\n INNER JOIN supplier \n ON supplier_product.supplier = supplier.name {filter_term}\n ORDER BY (product.rating + supplier.rating) DESC\n \"\"\"\n await cursor.execute(statement)\n categories = await cursor.fetchall()\n return categories",
"def category(self):\n\n for category, match_list in rule_list:\n for match in match_list:\n if match.match(self):\n return category\n\n return None",
"def calculate_rank(category, word_pairs): \n rand_vals = np.random.uniform(-1, 100, size=len(word_pairs)) #random numbers\n ranked = sorted(zip(rand_vals, word_pairs), key=lambda x:x[0], reverse=True) #sorting them\n formatted = []\n for e in ranked:\n prob = '%.1f'%e[0]\n formatted.append('{} \"{}:{}\"'.format(prob, e[1][0],e[1][1]))\n return formatted",
"def suggest_categories(self, category, max_results=10, blogid=1):\n return self.execute('wp.suggestCategories', blogid, self.username, self.password, category, max_results)",
"def search(query):\n r = requests.get(BASE_URL + str(query))\n page_body = r.text\n # Hand the page source to Beautiful Soup\n\n soup = BeautifulSoup(page_body, 'html.parser')\n \n product_item = soup.select('div.product-info-item')\n if(len(product_item)==0):\n product_item = soup.select('div.cat')\n #get the cateegory\n product_item = product_item[0]\n category = str(product_item.find_all('a'))\n category = category[category.find(\">\")+1:-5]\n \n url = str(soup.findAll('meta',property=\"og:url\"))\n url_splitted = url.split('/')\n print(url)\n #parent_category = url_splitted[4]\n if(len(url)>20):\n parent_category = url_splitted[4]\n else:\n parent_category = None\n\n \n return category, parent_category",
"def eval_category(self, u_eval, v_compares):\n # get cosinus sim with k-NN\n # cos_sim_results = [(tag, cos_sim) ... ] \n cos_sim_results = self.kNN(u_eval, v_compares)\n\n # found the most common tag\n c = Counter([tag for tag, _ in cos_sim_results])\n try:\n tag, number = c.most_common(1)[0]\n except IndexError: # No result (cos_sim_results is empty)\n logging.error(\"No results for %s %s\" % (u_eval, cos_sim_results))\n return\n\n # get the cosinus similarity average for the founded tag\n average = 0.0\n for _tag, _number in cos_sim_results:\n if _tag == tag:\n average += _number\n average /= number\n\n logging.debug(\"%s common tag %s (nb %s) (av %s)\" % \\\n (u_eval, tag, number, average))\n\n return tag, average",
"def display_by_category(request, category):\n if request.method == \"POST\":\n form = SearchForm(request.POST)\n if form.is_valid():\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category).filter(title__icontains=form.cleaned_data[\"query\"])\n else:\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category)\n else:\n products = AuctionListing.objects.filter(buyer=None).filter(\n category__iexact=category)\n form = None\n bids = []\n for product in products:\n bid = product.bids.all().aggregate(Max(\"bid\")).get(\"bid__max\")\n bids.append(bid)\n return render(request, \"auctions/index.html\", {\n \"zip_products_bids\": zip(products, bids),\n \"category\": category,\n \"form\": form,\n \"title\": \"Active Listing\",\n })",
"def keyword_classifier(utterance):\n categories = {\n 'hello': ['hi ', 'greetings', 'hello', 'what\\'s up', 'hey ', 'how are you?', 'good morning', 'good night',\n 'good evening', 'good day', 'howdy', 'hi-ya', 'hey ya'],\n 'bye': ['bye', 'cheerio', 'adios', 'sayonara', 'peace out', 'see ya', 'see you', 'c ya', 'c you', 'ciao'],\n 'ack': ['okay', 'whatever', 'ok ', 'o.k. ', 'kay ', 'fine '],\n 'confirm': ['is it', 'is that', 'make sure', 'confirm', 'double check', 'check again', 'does it'],\n 'deny': ['dont want', 'don\\'t want', 'wrong', 'dont like', 'don\\'t like'],\n 'inform': ['dont care', 'don\\'t care', 'whatever', 'bakery', 'bar', 'cafe', 'coffeeshop', 'pub', 'restaurants',\n 'roadhouse', 'african',\n 'american', 'arabian', 'asian', 'international', 'european', 'central american', 'middle eastern',\n 'world', 'vegan', 'vegetarian', 'free', 'kosher', 'traditional', 'fusion', 'modern', 'afghan',\n 'algerian', 'angolan', 'argentine',\n 'austrian', 'australian', 'bangladeshi', 'belarusian', 'belgian', 'bolivian', 'bosnian',\n 'herzegovinian', 'brazilian', 'british', 'bulgarian', 'cambodian',\n 'cameroonian', 'canadian', 'cantonese', 'catalan', 'caribbean', 'chadian', 'chinese', 'colombian',\n 'costa rican', 'czech', 'congolese', 'cuban', 'danish', 'ecuadorian', 'salvadoran', 'emirati',\n 'english', 'eritrean',\n 'estonian',\n 'ethiopian', 'finnish', 'french', 'german', 'ghanaian', 'greek', 'guatemalan', 'dutch', 'honduran',\n 'hungarian', 'icelandic',\n 'indian', 'indonesian', 'iranian', 'iraqi', 'irish', 'israeli', 'italian', 'ivorian', 'jamaican',\n 'japanese',\n 'jordanian', 'kazakh', 'kenyan', 'korean', 'lao', 'latvian', 'lebanese', 'libyan', 'lithuanian',\n 'malagasy', 'malaysian',\n 'mali', 'mauritanian', 'mediterranean', 'mexican', 'moroccan', 'namibian', 'new zealand',\n 'nicaraguan',\n 'nigerien', 'nigerian', 'norwegian', 'omani', 'pakistani', 'panamanian', 'paraguayan', 'peruvian',\n 'persian', 'philippine', 'polynesian', 'polish', 'portuguese', 'romanian', 'russian', 'scottish',\n 'senegalese', 'serbian',\n 'singaporean', 'slovak', 'somalian', 'spanish', 'sudanese', 'swedish', 'swiss', 'syrian', 'thai',\n 'tunisian', 'turkish',\n 'ukranian', 'uruguayan', 'vietnamese', 'welsh', 'zambian', 'zimbabwean', 'west', 'north', 'south',\n 'east', 'part of town', 'moderate', 'expensive', 'cheap', 'any ', 'priced', 'barbecue', 'burger',\n 'chicken',\n 'doughnut', 'fast food',\n 'fish and chips', 'hamburger', 'hot dog', 'ice cream', 'noodles', 'pasta', 'pancake', 'pizza',\n 'ramen', 'restaurant', 'seafood', 'steak',\n 'sandwich', 'sushi'],\n 'negate': ['no ', 'false', 'nope'],\n 'repeat': ['repeat', 'say again', 'what was that'],\n 'reqalts': ['how about', 'what about', 'anything else'],\n 'reqmore': ['more', 'additional information'],\n 'request': ['what', 'whats' 'what\\'s', 'why', 'where', 'when', 'how much', 'may', 'address', 'post code',\n 'location', 'phone number'],\n 'restart': ['reset', 'start over', 'restart'],\n 'thankyou': ['thank you', 'cheers', 'thanks'],\n 'affirm': ['ye ', 'yes', 'right ']\n }\n\n classification = []\n sentence_to_classify = utterance.lower()\n for category, keywords in categories.items():\n keywords_found = [keyword for keyword in keywords if keyword in sentence_to_classify]\n if len(keywords_found) > 0: classification.append(category)\n\n return classification if len(classification) > 0 else ['null']",
"def sort_by_ratings():\n\n print(\"***** Find Businesses by Categories Sorted by Rate *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10).sort(\"stars\", -1)\n\n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print(f'Stars: {business_object[\"stars\"]}')\n print_business(business_object)",
"def search_substitute(product):\r\n cursor.execute('USE openfoodfacts;')\r\n # Make a string with the categories used in the query\r\n search = product.category\r\n # Other variable\r\n product_name = product.name\r\n product_score = product.nutri_score\r\n\r\n cursor.execute(\"\"\"SELECT Food.id, Food.name, categories_id, nutri_score, url, stores \\\r\n FROM Food \\\r\n INNER JOIN Categories ON Food.categories_id = Categories.name\\\r\n WHERE categories_id LIKE %s AND Food.name NOT LIKE %s \\\r\n AND Food.nutri_score <= %s \"\"\", (search, product_name, product_score))\r\n substitute = cursor.fetchone()\r\n try:\r\n return cl.Food(substitute)\r\n except TypeError:\r\n print(\"Désolé, il n'y a pas de substitut pour ce product...\")",
"def classify(self, doc, default=None):\n probs = {}\n \n # Find the category with the highest probability\n max = Decimal(0)\n for cat in self.categories():\n probs[cat] = self.prob(doc, cat)\n if probs[cat] > max: \n max = probs[cat]\n best = cat\n\n if max == 0.0:\n return default\n \n # Make sure the probability exceeds threshold*next best\n for cat in probs:\n if cat == best:\n continue\n if probs[cat]*Decimal(str(self.get_threshold(best)))>probs[best]:\n return default\n \n print probs[best]\n return best",
"def category_match(self,single_word, input_dataframe):\n temp_set = self.index_dict[single_word]\n category_list = list(temp_set)\n filtered_df = input_dataframe.loc[category_list]\n return filtered_df",
"def get_topk_terms(self, k, label, term_type='w'):\n counter = Counter()\n # Retrieve reviews with the input label\n label_reviews = self.df[self.df[self.truth_col] == label][self.review_col]\n for review in label_reviews:\n terms = self.tokenize(review, term_type)\n counter.update(terms)\n\n return counter.most_common(k)",
"def rank_results(result_list, search_title, search_artist, uploader_list):\n #scores = []\n #search_artist = search_artist.replace(\"+\", \" \").lower()\n search_title = search_title.replace(\"+\", \" \")\n #search_terms = search_title.split() + search_artist.split()\n\n ## Give score to each result\n #for index, title in enumerate(result_list):\n # title = title.lower()\n # score = 0\n\n # # One point for each word in result title\n # for term in search_terms:\n # if term in title:\n # score += 1\n\n # # 2 points if whole title in result, 2 points for whole artist, 4 points for both\n # if search_title in title:\n # score += 2\n # if search_artist in title:\n # score += 2\n # if search_title in title and search_artist in title:\n # score += 4\n # if search_title == title and (uploader_list[index] == search_artist+\" - topic\" or uploader_list[index] == 'various artists - topic' or uploader_list[index] == search_artist or uploader_list[index] == search_artist+'\\\\xa0'):\n # score += 100\n # if 'karaoke' in title:\n # score-=1000\n\n # scores.append(score)\n\n # return scores.index(max(scores))\n for index, title in enumerate(result_list):\n title = title\n if search_title == title:\n return index\n\n return 0",
"def get_closest(list_of_nearby, favorite_place):\n\tref_rating = float(favorite_place[\"rating\"]) # this is a float\n\tref_price_len = len(favorite_place[\"price\"]) # this is the length of the dollar sign - an int\n\tref_categ = favorite_place[\"categories\"] # this is a string!\n\n\tfor item in list_of_nearby:\n\t\tscore = 0\n\t\tlist_of_cat_words = item[categories].split()\n\t\tfor word in list_of_cat_words:\n\t\t\tif word in ref_categ:\n\t\t\t\tscore += 1\n\t\tscore = score * 5\n\t\tscore = score - 2 * abs(len(item[\"price\"]) - ref_price_len)\n\t\tscore = score - 10 * abs(float(item[\"rating\"]) - ref_rating)\n\t\titem[\"score\"] = score\n\n\tfor item in list_of_nearby:\n\t\treturn_list = []\n\t\treturn_list.append({\"id\": item[\"id\"], \"score\": item[\"score\"]})\n\n\treturn_list = sorted(return_list, key = lambda i: i[\"score\"])\n\treturn return_list",
"async def search(self, ctx: Context, category: str, *, query: str) -> None:\n if category not in config.basic_search_categories:\n await ctx.send(f\"Invalid Category! ```Available Categories : {', '.join(config.basic_search_categories)}```\")\n return\n await self._basic_search(ctx, query, category)",
"def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")",
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def CategoryScore(Category):\r\n \r\n Category = pd.read_excel('OutdoorScores.xlsx', Category , \r\n usecols=[0,1,2,3,4])\r\n ResultCategory = Category.sort_values(['Score','Golds','Hits'],\r\n ascending=[False,False,False],na_position='last')\r\n ResultCategory = ResultCategory.reset_index(drop=True)\r\n N=0\r\n for i in range(100):\r\n N += 1\r\n if pd.isnull(Category.loc[N,'Name']) == True: \r\n # looks at row N, column 'Name'\r\n break\r\n return ResultCategory[0:N] # if the cell is NaN, stops at row N\r",
"def searchPoses():\n\n # TODO: how to account for substrings too? postgres doesn't seem to do this...maybe algolia is better\n if request.args:\n keyword = request.args.get('keyword')\n\n difficulty = request.args.getlist('difficulty') # list of difficulty\n if not difficulty: # if the list is empty\n difficulty = ['Beginner', 'Intermediate', 'Expert']\n\n categories = request.args.getlist('categories') # list of categories\n if not categories:\n all_cat_ids = db.session.query(Category.cat_id).all() # returns a list of tuples of all the ids\n categories = [category[0] for category in all_cat_ids] # converts that to a list\n \n query = db.session.query(Pose).join(PoseCategory)\n query = search(query, keyword, sort=True) # sort the search results by ranking\n all_poses = query.filter(Pose.difficulty.in_(difficulty),PoseCategory.cat_id.in_(categories)).order_by(Pose.name).all()\n \n else:\n all_poses = Pose.query.order_by('name').all()\n\n # make a dictionary of all the counts for the category and difficulty\n # TODO: try doing a subquery in SQLAlchemy for better performance?\n # https://stackoverflow.com/questions/38878897/how-to-make-a-subquery-in-sqlalchemy\n difficulty_counts = {'Beginner':0, 'Intermediate': 0, 'Expert':0}\n category_counts = {}\n for pose in all_poses:\n difficulty_counts[pose.difficulty] += 1\n pose_categories = pose.pose_categories # a list of pose_categories\n for pose_cat in pose_categories:\n if pose_cat.cat_id not in category_counts:\n category_counts[pose_cat.cat_id] = 0\n category_counts[pose_cat.cat_id] +=1\n\n all_categories = Category.query.order_by('name').all()\n\n return render_template(\"search.html\", \n all_poses=all_poses, \n categories=all_categories,\n difficulty_counts=difficulty_counts,\n category_counts=category_counts)",
"def getBest(self, category):\n if category == 'Accuracy':\n index = np.argmax(self.trainAcc)\n elif category == 'Error':\n index = np.argmin(self.trainError)\n\n return self.trainError[index], self.trainAcc[index], self.w[index]",
"def search_by_term():\n body = request.get_json()\n term = body.get('searchTerm', '')\n current_category = None\n\n if term == '':\n abort(422)\n\n questions = Question.query.filter(Question.question.ilike('%'+term+'%')).all()\n formatted_questions = [question.format() for question in questions]\n\n if len(formatted_questions) > 0:\n current_category = formatted_questions[0]['category']\n\n return jsonify({\n 'success': True,\n 'questions': formatted_questions,\n 'total_questions': len(formatted_questions),\n 'current_category': current_category\n })",
"def suggest(ctx, request: str):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n suggestion = replacer.suggest(request)\n if suggestion == request:\n logger.info(\n 'Result from Google Knowledge Graph equals input: \"{0}\"', request,\n )\n elif suggestion:\n logger.info('Result from Google Knowledge Graph: \"{0}\"', suggestion)\n else:\n logger.info(\n 'No results in the Google Knowledge Graph for: \"{0}\"', request,\n )",
"def topKSimilar(self,word,k = 5,maxDistance = 5):\n r = self.__search(word,self.root,maxDistance)\n return sorted(r.items(),key = lambda x:x[1])[0:k]",
"def search(category, keywords, maximum_price,\n minimum_price, sort, page):\n params = dict(\n genreId=category,\n keyword=keywords\n )\n if maximum_price is not None:\n params['maxPrice'] = int(float(maximum_price))\n if minimum_price is not None:\n params['minPrice'] = int(float(minimum_price))\n if sort is not None:\n params['sort'] = '+itemPrice' if sort == 'price' else '-itemPrice'\n\n # we threat 100 results as 1 page which equals to 4 pages\n # on current API search request\n page = page * 4\n\n # lets load 100 search results\n results = []\n # pylint: disable=bad-builtin\n for i in range(page, page + 4):\n result = RAKUTEN.item_search(hits=25, page=i+1, **params)\n result = map(extract_data, result.get('Items', []))\n results.append(result)\n responce = list(filter(IS_DATA_VALID, chain(*results)))\n return responce",
"def getChuckFactWithCategory(category):\n try:\n fact = requests.get(\"https://api.chucknorris.io/jokes/random?category=\" + category)\n return fact.json()['value']\n except requests.RequestException as e:\n print(e)\n raise e",
"def search(index_data, link_data, stop_word_list, search_string):\n\n topN = 5\n\n query_terms = sanitize(search_strings, stop_word_list)\n print(query_terms)\n\n # get all links which contain all the query terms\n links = get_links(query_terms)\n print(\"\\nURLs containing all of the query terms (%d):\" % len(links))\n for l in links:\n print(l)\n\n # rank the links using Vector model\n vector_ranked = rank_links(index_data, query_terms, links)\n #print(ranked_list)\n \n # build a graph of the links\n graph = build_graph(link_data, links)\n\n # rank the links using Vector model\n page_ranked = calculate_pagerank_with_teleport(graph, EPSILON, 10) \n \n # return the data\n return links, vector_ranked, page_ranked",
"def search2(term, location, distance):\n\n print location\n if float(distance) <= 5.0:\n radius = 0.75\n elif float(distance) > 5.0 and float(distance) <= 10.0:\n radius = 1.5\n elif float(distance) > 10.0 and float(distance) <= 25.0:\n radius = 4\n elif float(distance) > 25.0 and float(distance) <= 100.0:\n radius = 8\n elif float(distance) > 100.0:\n radius = 15\n\n metradius = int(float(1609) * float(radius))\n print metradius\n url_params = {\n 'category_filter': term.replace(' ', '+'),\n 'radius_filter': metradius,\n 'll': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, url_params=url_params)",
"def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)",
"def suggest_score(popularity, vote_average, scores):\n # Popularity is calculated by searches, favourites, votes, etc: https://developers.themoviedb.org/3/getting-started/popularity\n # Popularity can vary from 100 (a popular old movie) to 2000 (newly released hyped movie)\n popularity_score = math.atan(popularity)\n # Relevance score rewards movies with many similar movies (from your scores). Set to '1' to ignore relevance.\n relevance_score = math.atan(math.sqrt(len(scores)*10))\n # how vote average on TMDb should affect the score. This function uses a circular shape.\n vote_average_score = math.sqrt(1-(1-vote_average/10)**1)\n # how the scores of your similar ratings should be affecting suggestions\n similar_average_score = (sum(scores) / 10) / len(scores)\n\n personified_score = popularity_score * similar_average_score * relevance_score\n score = popularity_score * similar_average_score * relevance_score * vote_average_score\n return score, personified_score",
"def use_keyword_classifier():\n while True:\n utterance = input(\"\\n\\nEnter utterance you want to classify, \\ntype menu or exit to go back:\\n-> \").lower()\n if utterance == \"menu\" or utterance == \"exit\":\n break\n else:\n try:\n label_pred = keyword_classifier(utterance)\n print(\"Prediction: {0}\".format(*label_pred))\n except ValueError:\n print(\"Prediction: {0}\".format(\"null\"))",
"def search_by_category(query, page=1, pagesize=12, max_price=99999999, min_price=0):\n create_elastic_connection()\n\n es = Elasticsearch(['elasticsearch:9200'])\n s = Search(index=\"product-index\").using(es)\n \n print(query, file=sys.stderr)\n\n q = Q({\"multi_match\": {\n \"query\": query,\n \"fields\": [\"category\"],\n \"fuzziness\": \"AUTO\"\n }\n })\n\n s = s.query(q)[0:1000]\n\n f = Q({'range':{'price':{'gte':min_price,'lte':max_price}}})\n s = s.filter( f )\n\n print(s.execute().to_dict(), file=sys.stderr)\n\n results = []\n for hit in s:\n results.append(models.Product.objects.get(name=hit.name))\n\n print(results, file=sys.stderr)\n\n paginator = Paginator(results, pagesize)\n\n try:\n items = paginator.get_page(page)\n except PageNotAnInteger:\n items = paginator.get_page(1)\n except EmptyPage:\n items = paginator.get_page(paginator.num_pages)\n\n return items",
"def search(self, text: str, category: str = None):\n if category:\n # ensuring that it will be in lowercase\n category = category.lower()\n\n if not category or not category in self.search_categories:\n category = \"all\"\n\n search_url = f\"{SITE_URL}/{self.search_categories[category]}/?adb.search={text}\"\n\n # return answer.text\n return self.fetch_url(search_url)",
"def _request_category(self, category_str):\n return self._request(self._wikipedia, category_str, query_key='cmtitle')",
"def judge_katamari(self, katamari_contents: List[Item]) -> Tuple[float, List[Tuple[str, int]], List[str]]:\n size_total = 0\n cousins = []\n my_dict = {}\n\n for items in katamari_contents:\n # keep track of cousins\n if items.category == \"cousins\":\n cousins.append(items.name)\n # cousins do not contribute to size.\n if items.category != \"cousins\":\n size_total += self.score_book[items.name]\n\n # if in my dictionary, increment frequency value\n if my_dict.get(items.category) is not None:\n my_dict[items.category] += 1\n else:\n # if not in the dictionary, add it with frequency value of 1\n my_dict[items.category] = 1\n\n list_of_tuples = list(my_dict.items())\n # (x[1], x[0]) causes sort to check key value if values are equivalent.\n list_of_tuples.sort(key=lambda x: (x[1], x[0]), reverse=True)\n top_categories = list_of_tuples[:3]\n\n return round(size_total, 1), top_categories, cousins",
"def parse_search_results(data, category):\n paginate(data[\"pagination\"], category)\n for data in data[\"results\"]:\n add_menu_item(play_film,\n data.get(\"title\").title(),\n {\"href\": \"{}?fo=json\".format(data.get(\"url\"))},\n locs.get_art(data),\n locs.get_info(data),\n False)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_GENRE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)",
"def filter_top_cv(self, k=10, csv_file=\"enhanced.csv\"):\n # get label\n all_cv = []\n label_file = pd.read_csv(csv_file)\n labels = label_file['label']\n for label in labels:\n all_cv += list(label.split(\";\"))\n\n # count label num to get top-frequent label\n count = Counter(all_cv)\n top_cv = [x[0] for x in count.most_common(k)]\n\n # make dict mapping dir to label\n d = {}\n genes = label_file['Gene']\n labels = label_file['label']\n length = len(genes)\n for i in range(length):\n d[genes[i]] = list(labels[i].split(\";\"))\n\n filter_d = {}\n all_sids = sorted(d.keys())\n for sid in all_sids:\n for label in d[sid]:\n if label not in top_cv:\n continue\n if sid not in filter_d:\n filter_d[sid] = []\n filter_d[sid].append(label)\n\n if len(top_cv) < k:\n print(\"Error: top cv less than k\", count)\n return filter_d, top_cv",
"def apply_fm_evaluation(self, tag_bundle_set):\n for num, category in enumerate(self.categories):\n (is_matching_category, op_result_meta_info) = category.apply(tag_bundle_set)\n if is_matching_category:\n return (num, category.name, op_result_meta_info) if self.debug else (num, category.name)\n return (-1, None, []) if self.debug else (-1, None)",
"def all_nutrition(request):\n\n foods = Nutrition.objects.all()\n all_categories = Category.objects.all()\n query = None\n categories = None\n\n if request.GET:\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n foods = foods.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"Oops, looks like you didn't search for anything!\")\n return redirect(reverse('nutrition'))\n\n queries = Q(Food__icontains=query) | Q(category__friendly_name__icontains=query)\n foods = foods.filter(queries)\n\n context = {\n 'foods': foods,\n 'search_term': query,\n 'current_categories': categories,\n 'categories': all_categories,\n }\n\n return render(request, 'nutrition/nutrition.html', context)",
"def use_random_keyword_classifier():\n while True:\n utterance = input(\"\\n\\nEnter utterance you want to classify, \\ntype menu or exit to go back:\\n-> \").lower()\n if utterance == \"menu\" or utterance == \"exit\":\n break\n else:\n try:\n label_pred = random_keyword_classifier(utterance)\n print(\"Prediction: {0}\".format(*label_pred))\n except ValueError:\n print(\"Prediction: {0}\".format(\"null\"))",
"def _load_suggestion(self):\n curItem = self.tree.focus()\n parent = self.tree.parent(curItem)\n\n categories = ['approved', 'conflicts', 'suggestions', 'unknown', \\\n 'cldr',]\n if parent is '':\n #skip it\n pass\n else:\n if parent not in categories:\n curTerm = parent\n category = self.tree.parent(parent)\n else:\n curTerm = curItem\n category = parent\n if CurItem != CurTerm:\n self.preferred.set(self.tree.item(curItem)['values'][1])",
"def recommendCategory(self, page, randomWeb, centroids = None, nSugg = None, printRes = True):\n if(centroids is None):\n try:\n centroids = self.readFile(\"centroids.pickle\")\n except:\n centroids = self.getAllCentroids()\n \n if(randomWeb):\n try:\n wikipedia.page(page)\n except Exception as e:\n print(\"We did not find the page on wikipedia, write it without '_'\")\n return\n\n if(printRes):\n print(\"\\nI'm categorizing the '%s' page..\" % page)\n \n pageVector = self.getVector(page)\n \n res = {cat:Categorization.cosin_sim_pairs(pageVector, centre) for cat, centre in centroids.items()}\n\n if(randomWeb):\n actual = [ParseDumpWiki.normName(c) for c in wikipedia.page(page).categories]\n else:\n actual = self.db.getCategoriesGivenPage(page)\n\n if(nSugg is None):\n if(len(res) < len(actual)):\n nSugg = len(res)\n else:\n nSugg = len(actual)\n\n top = sorted(res.items(), key = lambda kv: kv[1], reverse=True)[:nSugg]\n m1, m2, m3 = self.measures(actual, top, nSugg)\n if(printRes):\n Categorization.printStats(m2, m3, actual, top)\n return m1, m2, m3",
"def category_choice_input(self):\n self.category = input(fr.FR[8])\n try:\n if self.category == \"q\":\n self.leave_category_choice -= 1\n elif 1 <= int(self.category) <= len(config.CATEGORIES):\n print(self.category)\n self.products = self.product_table.get_list_product(\n self.category)\n self.products_sub = self.product_table.get_list_product(\n self.category)\n self.choice_product()\n self.leave_category_choice -= 1\n except ValueError:\n print(fr.FR[10])",
"def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)",
"def top_k_result(self, score, k=1):\n self.candidates = []\n self.candidate_sub = []\n self.top_k = heapq.nlargest(n=k, iterable=score)\n\n # Iterate the Top k scores, print each score and standard term\n for top_i in self.top_k:\n max_index = score.index(top_i)\n\n # Find the synonym term (might include standard terms)\n match_standard = self.synonym_term[max_index]\n\n # Find the standard term\n p_index = self.standard_synonym.tolist().index(match_standard)\n synonym_term = self.knowledge[p_index, 1]\n standard_term = self.knowledge[p_index, 0]\n\n # These lines of codes are mainly for Sub-words frequency\n temp_pre_name = remove_punctuation(term=synonym_term)\n temp_pre_name, _ = find_English_term(term=temp_pre_name)\n synonym_term_sub = self.subword_embed_calss.get_subword(term=temp_pre_name, is_print=False)\n synonym_term_sub = ' '.join(synonym_term_sub)\n self.candidates.append(standard_term)\n self.candidate_sub.append(synonym_term_sub)\n\n print('Top 10 Mapping ::: ', self.input_str, '----->', synonym_term, '----->', standard_term, ' (Similarity: ', top_i, ')')\n return standard_term",
"def print_categories():\n u_item_dict = {}\n with open('service_now_ticket_sample.csv') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if row['u_category_gear'] not in u_item_dict:\n u_item_dict[row['u_category_gear']] = 1\n elif row[\"U_Category Match Found in GEAR?\"] == \"Yes\":\n u_item_dict[row['u_category_gear']] = u_item_dict[row['u_category_gear']] + 1\n print(sorted(u_item_dict.items(), key=operator.itemgetter(1)))",
"def search(self, query):\n top_texts, top_ids, top_scores = self.retrieve_topn(query)\n return self.similarity.re_rank(query, top_texts, top_ids)",
"def top_5_similar_2(list_string, my_nlp=nlp1, model_type=my_model, doc_topic=my_doc_topic):\n vec = my_nlp.transform(list_string)\n vtrans = model_type.transform(vec)\n array_5 = pairwise_distances(vtrans, doc_topic, metric='cosine').argsort()[0][0:5]\n # result_df = df_reviews[['game_link']].iloc[array_5]\n return df_reviews[['game']].iloc[array_5]\n # return(\"test\")\n return result_df",
"def filter_inference_results(self, predictions, object_category='person'):\n if predictions is not None and len(predictions) == 3:\n bboxes, labels, confs = predictions\n\n # Only return bounding boxes for the selected object category.\n category_bboxes = [(bbox, \n label, \n conf) for (bbox, \n label, \n conf) in zip(bboxes, \n labels, \n confs) if (label == object_category).any()]\n\n if len(category_bboxes) > 0:\n # Choose biggest object of selected category.\n biggest_bbox = None\n biggest_label = None\n biggest_conf = None\n most_pixels = 0\n\n for (bbox, label, conf) in category_bboxes:\n (x, y, w, h) = bbox\n n_pixels = w * h\n\n if n_pixels > most_pixels:\n most_pixels = n_pixels\n biggest_bbox = bbox\n biggest_label = label\n biggest_conf = conf\n\n category_bboxes = ([biggest_bbox], [biggest_label], [biggest_conf])\n\n predictions = category_bboxes\n\n return predictions",
"def fetchSuggestion(self, keyword, seed_keyword, meta_keyword):\n # user agent is an HTTP browser request header that gives servers information regarding the client device and/or operating system on which the browser is running\n user_agent_list = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',\n ]\n url = \"http://suggestqueries.google.com/complete/search?client=chrome&hl={}&gl={}&callback=?&q={}\".format(\n self.language, self.country, keyword)\n user_agent = random.choice(user_agent_list)\n headers = {\"user-agent\": user_agent, \"dataType\": \"jsonp\"}\n response = requests.get(url, headers=headers, verify=True)\n if response.status_code == 200:\n suggestions = json.loads(response.text)\n sugg = []\n index = 0\n relevancies = []\n suggesttypes = []\n suggestsubtypes = []\n verbatimrelevance = \"\"\n if \"google:suggestrelevance\" in suggestions[4].keys():\n relevancies = suggestions[4]['google:suggestrelevance']\n if \"google:suggesttype\" in suggestions[4].keys():\n suggesttypes = suggestions[4]['google:suggesttype']\n if \"google:verbatimrelevance\" in suggestions[4].keys():\n verbatimrelevance = suggestions[4]['google:verbatimrelevance']\n if \"google:suggestsubtypes\" in suggestions[4].keys():\n suggestsubtypes = suggestions[4]['google:suggestsubtypes']\n for word in suggestions[1]:\n if self.checkSeedKeywordExists(word, meta_keyword):\n sugg.append({\n 'keyword': word,\n 'relevancy_score': relevancies[index] if len(relevancies) > 0 else None,\n 'suggesttype':suggesttypes[index] if len(suggesttypes) > 0 else None,\n 'verbatimrelevance' : verbatimrelevance,\n 'seed_keyword': seed_keyword,\n 'meta_keyword': meta_keyword,\n 'suggestsubtype' : suggestsubtypes[index] if len(suggestsubtypes) > 0 else None,\n })\n else:\n continue\n index += 1\n return sugg\n # returning false when google blocks an ip for some time \n return False",
"def search_categories(self):\n with Transaction().start(DBNAME, 1):\n categorieslist = self.Category.search(['parent', '=', 'Ingredients'])\n return tuple(i.name for i in categorieslist)",
"def get_output(interpreter, top_k, score_threshold):\n scores = cvtf.output_tensor(interpreter, 0)\n categories = [\n Category(i, scores[i])\n for i in np.argpartition(scores, -top_k)[-top_k:]\n if scores[i] >= score_threshold\n ]\n return sorted(categories, key=operator.itemgetter(1), reverse=True)",
"def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]",
"def wordByCategoryName(self, category):\n\t\ttry:\n\t\t\tl = self.dictData[category]\n\t\t\tindex = random.randint(1, len(l)) -1\n\t\t\treturn (l[index])\n\t\texcept KeyError:\n\t\t\tprint (\"La categoría ingresada no existe.\")",
"def select_best(self,dataframe: pd.DataFrame):\n \n # create a Dataframe only for categorical variables\n # categorical_df = pd.get_dummies(dataframe[self.cat_feats])\n categorical_df = dataframe[self.cat_feats]\n \n for feats in self.cat_feats:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(dataframe[feats].values)\n categorical_df.loc[:,feats] = lbl.transform(dataframe[feats].values)\n \n # select only Top 5 variables \n selector = SelectKBest(chi2,k=5)\n # give the targetcolumn and the rest of the data to the scalar to fit\n selector.fit(categorical_df,dataframe[self.target_cols])\n # get the indicies of the selected columns\n cols = selector.get_support(indices=True)\n\n # For display purpose Only\n dfscores = pd.DataFrame(selector.scores_)\n dfcolumns = pd.DataFrame(categorical_df.columns)\n\n #concat two dataframes for better visualization \n featureScores = pd.concat([dfcolumns,dfscores],axis=1)\n featureScores.columns = ['Features','Score'] #naming the dataframe columns\n featureScores = featureScores.sort_values(by='Score', ascending=False)\n \n utils.bar_plot(\n x_data= featureScores['Features'],\n y_data=featureScores['Score'],\n title=\"Select_K_Best using CHI2 For Categorical Features\",\n x_title=\"Features\",\n y_title=\"CHI2 Score\",\n output_path= os.path.join(self.output_path,\"select_k_best_chi2.html\")\n )\n \n self.cat_feats = featureScores['Features'].values.tolist()[:self.num_best]\n # drop the columns which did not qualify\n for feats in self.dataframe_d_copy.columns:\n if feats not in self.cat_feats:\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feats,axis=1)\n return self.cat_feats",
"def definitionsearch(category, search_me, userid):\n user_id = userid\n search_me = search_me.strip()\n # get wikipedia\n try:\n # see if it is saved in db\n search_term = db_search_terms.find_one({\"value\": search_me.lower()})\n if search_term and search_term.get(\"category\") == category:\n search_id = search_term.get(\"_id\")\n wiki = db_wikipedia.find_one({\"search_id\": search_id})\n if wiki:\n wiki_def = wiki.get('wiki_summary')\n # else use wikipedia API\n else:\n wiki_def = wisdomaiengine.factualsearch(category, search_me.lower())\n except:\n wiki_def = \"Oops\"\n # check if search_term has been run before\n results = db_search_terms.find_one({\"value\": search_me.lower()})\n if results and results.get(\"category\") == category:\n search_id = results.get('_id')\n else:\n data = {\"value\": search_me.lower(), \"category\": category}\n search_id = db_search_terms.insert(data, check_keys=False)\n # write data to searches collection\n data = {\"search_id\": search_id,\n \"category\": category,\n \"user\": userid,\n \"datetime\": datetime.utcnow()}\n x = db_searches.insert(data, check_keys=False)\n # save data to wikipedia collection\n wiki = db_wikipedia.find_one({\"search_id\": search_id})\n if wiki:\n if wiki.get(\"wiki_summary\") != wiki_def:\n data = {\"search_id\": search_id,\n \"wiki_summary\": wiki_def,\n \"datetime\": datetime.utcnow()}\n db_wikipedia.update({\"search_id\": search_id}, {\"$set\": data})\n else:\n data = {\"search_id\": search_id,\n \"wiki_summary\": wiki_def, \n \"datetime\": datetime.utcnow()}\n x = db_wikipedia.insert(data, check_keys=False)\n # return json\n jsonob = jsonify(search=search_me,\n factual=wiki_def)\n return jsonob",
"def the_search_function(company_name, top_count=5):\n main_company = company_data[company_data.name == company_name].iloc[0]\n\n if top_count == 1:\n search_str = \"\\nSearching for the closest company to %s...\\n\" \\\n % (main_company[\"name\"])\n\n else:\n search_str = \"\\nSearching for top %g closest companies to %s...\\n\" \\\n % (top_count, main_company[\"name\"])\n\n print search_str\n\n matching_companies = match_keywords_descriptions(main_company) \n search_results = search_descriptions(matching_companies, main_company, \n top_count)\n\n if search_results:\n print \"Results:\"\n\n for result in search_results:\n print \"\\t\" + result\n\n else:\n print \"No results available\"",
"def solution(model, top_p, top_idx, category_names):\n top_p = top_p[0].cpu().detach().numpy()\n\n top_cats_names = []\n for idx in top_idx[0].cpu().detach().numpy():\n cat = model.idx_to_class[str(idx)]\n name = cat_to_name(cat, category_names)\n top_cats_names.append(name)\n\n return top_p, top_cats_names",
"def ask_user_for_relevance(query_results):\n for i, result in enumerate(query_results):\n hdr = 'Result #%d ' % (i+1)\n prompt_text = 'Is result #%d relevant? [y/n] ' % (i+1)\n print '\\n' + hdr + '-'*(70 - len(hdr))\n print result.to_formatted_string()\n print '-'*70\n while True:\n user_in = raw_input(prompt_text).strip().lower()\n if user_in == 'y' or user_in == 'n':\n break\n if user_in == 'y':\n result.is_relevant = True",
"def closest_match(desired_language: {str, Language}, supported_languages: list,\n max_distance: int=25) -> (str, int):\n # Quickly return if the desired language is directly supported\n if desired_language in supported_languages:\n return desired_language, 0\n\n # Reduce the desired language to a standard form that could also match\n desired_language = standardize_tag(desired_language)\n if desired_language in supported_languages:\n return desired_language, 0\n\n match_distances = [\n (supported, tag_distance(desired_language, supported))\n for supported in supported_languages\n ]\n match_distances = [\n (supported, distance) for (supported, distance) in match_distances\n if distance <= max_distance\n ] + [('und', 1000)]\n\n match_distances.sort(key=itemgetter(1))\n return match_distances[0]",
"def __choose_best_matching_candidate(candidates, artist):\n\n artist_names = set()\n for match in candidates:\n artist_names.add(match[1])\n\n # If there is more than 1 matched artist:\n if len(artist_names) > 1:\n \n best_distance = 10000\n best_artist = \"\"\n\n # Calculate the levenshtein edit distance between the searched artist name and the artist names in the search results.\n for matched_artist in artist_names:\n distance = editdistance.eval(matched_artist, artist)\n if distance < best_distance:\n best_distance = distance\n best_artist = matched_artist\n\n # Then exclude from candidates all matches that are NOT from the best artist\n candidates = [candidate for candidate in candidates if candidate[1] == best_artist]\n else:\n best_artist = artist_names.pop()\n best_distance = editdistance.eval(best_artist, artist)\n\n # Threshold candidate name to the artist name\n ratio = best_distance/len(artist)\n # Allow ~15% difference\n if ratio > 0.15:\n raise MatchNotFoundError(\"Closest artist is too far of the queried artist\")\n\n # Descending list\n sort_on_num_ratings = sorted(candidates, key=lambda cand: cand[2], reverse=True)\n\n # Take the one with the most votes\n selected = sort_on_num_ratings[0]\n\n # Unless it has a rating lower than 4.\n if selected[3] < 4:\n\n sort_on_rating = sorted(candidates, key=lambda cand: cand[3], reverse=True)\n\n # If there is one with a rating higher than 4, select that one. \n if sort_on_rating[0][3] > 4:\n selected = sort_on_rating[0]\n\n return selected",
"def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def scan_category(category_name=\"\"):\n import traceback\n from categoryscanner import update_category_size\n \n #check if we have been disabled and if so just abort\n if not getattr(settings,\"GNMPLUTOSTATS_CATEGORY_SCAN_ENABLED\",True):\n logger.warning(\"Scan category {0}: Category scanning has been disabled, exiting\".format(category_name))\n return\n\n try:\n logger.info(\"Starting scan of category {0}\".format(category_name))\n result = update_category_size(category_name)\n logger.info(\"Completed scan of category {0}\".format(category_name))\n result['attached'].save(category_name=category_name, is_attached=True)\n result['unattached'].save(category_name=category_name, is_attached=False)\n\n except Exception as e:\n logger.error(traceback.format_exc())\n raise #re-raise to see error in Celery Flower",
"def suggested_search(search_text):\n threshold = 0.6\n global model\n\n search_text = remove_stop_words(search_text)\n tmp_search = search_text.split()\n\n new_search = []\n for word in tmp_search:\n similar_words = get_similar_words(model, word)\n new_search = select_top_words(similar_words, new_search, threshold)\n\n new_search = list(set(new_search))\n new_search = ' '.join(new_search)\n\n return new_search + ' ' + search_text",
"def find_substitute(self):\n\n products_list = None\n\n while not products_list:\n self.get_targeted_category()\n\n db.connect()\n db.execute(\"\"\"\n SELECT product_id, nutriscore_id\n FROM Product_per_category\n INNER JOIN Product\n ON Product.id = product_id\n WHERE category_id = %s AND nutriscore_id < %s\n ORDER BY nutriscore_id\n \"\"\", (self.category_id, self.nutriscore,))\n products_list = db.fetch(True)\n db.disconnect()\n self.category_concordance += 1\n\n return products_list[0][0]",
"def category(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"category\")",
"def ranked_sense_summary(**kwargs):\r\n level = kwargs.get('level', 3)\r\n lemma = kwargs.get('lemma')\r\n wordclass = kwargs.get('wordclass')\r\n omit_null = kwargs.get('omit_null', False)\r\n\r\n if lemma is not None:\r\n main_sense = main_sense_finder.main_sense(lemma=lemma,\r\n wordclass=wordclass,\r\n listed_only=True)\r\n if main_sense is not None:\r\n kwargs['promote'] = main_sense.refid\r\n\r\n candidates = tdb.ranked_search(**kwargs)\r\n if omit_null:\r\n candidates = [c for c in candidates if c.thesclass is not None]\r\n\r\n # Give each thesclass a probability value (as a ratio of the sum\r\n # of all senses' ratings)\r\n total = sum([c.rating() for c in candidates])\r\n if total <= 0:\r\n total = 1\r\n\r\n summary = {}\r\n for c in candidates:\r\n if c.thesclass is None or c.thesclass.ancestor(level=level) is None:\r\n ancestor = None\r\n identifier = 0\r\n else:\r\n ancestor = c.thesclass.ancestor(level=level)\r\n identifier = ancestor.id\r\n if not identifier in summary:\r\n summary[identifier] = ResultRow(ancestor)\r\n summary[identifier].append(c)\r\n\r\n # Convert to a list\r\n summary = list(summary.values())\r\n # Add a probability score (0 < p < 1) to each row\r\n [row.set_probability(total) for row in summary]\r\n # Sort by probability\r\n summary.sort(key=lambda r: r.probability, reverse=True)\r\n\r\n return summary",
"def recall_at_k(model, ratings, k, relevance_func):\n predictions = model.predict_all()\n np.place(predictions, ratings == 0, -np.Inf)\n recommended = predictions.argsort(1)[::, :-k-1:-1]\n \n relevance_per_user = np.take_along_axis(ratings, recommended, 1)\n relevance_per_user = relevance_func(relevance_per_user).sum(1)\n \n max_relevance_per_user = relevance_func(ratings)\n max_relevance_per_user = max_relevance_per_user.sum(1).clip(0, k)\n \n recall = np.divide(relevance_per_user, max_relevance_per_user)\n \n return np.nanmean(recall)",
"def filter_category(category_list: List, category: str):\n for cat in category_list:\n if cat[\"name\"] == category:\n return cat\n return category_list.pop()",
"def get_targeted_category(self):\n\n db.execute(\"\"\"\n SELECT Category.id,\n (\n SELECT COUNT(Product_per_category.product_id)\n FROM Category AS category_duplicate\n INNER JOIN Product_per_category\n ON Product_per_category.category_id = category_duplicate.id\n WHERE Category.id = category_duplicate.id\n ) AS products_count\n FROM Product\n INNER JOIN Product_per_category\n ON Product.id = Product_per_category.product_id\n INNER JOIN Category\n ON Category.id = Product_per_category.category_id\n WHERE Product.id = %s\n ORDER BY products_count\n \"\"\", (self.id,))\n try:\n self.category_id = db.fetch(True)[self.category_concordance][0]\n except IndexError:\n return",
"def classify(listOfTopics):\n result = []\n\n def catForTop(regexes, topic):\n for pattern in regexes:\n if re.match(pattern, topic):\n return True\n return False\n\n for category, regexes in categ.iteritems():\n for topic in listOfTopics:\n if catForTop(regexes, topic):\n result.append(category)\n return result",
"def search_category(self):\n return _(self.child_class)",
"def __ranking_function(self, doc, query_tokens):",
"def goto_category_by_title(self,category):\n\n return self.catbrowser.goto_category_by_title(category)",
"def relevance(self,\n input_nchw: torch.Tensor,\n label_idx_n: Optional[torch.Tensor] = None) -> torch.Tensor:\n pf.sanity_checks.ensure_nchw_format(input_nchw)\n pf.sanity_checks.verify_square_input(input_nchw)\n\n self.input_nchw = input_nchw.to(device=DEVICE)\n\n # Prepare to compute input gradient\n # Reset gradient\n self.model.zero_grad()\n input_nchw.requires_grad = True\n\n # ZBoxrule-specific parameters\n low: Union[torch.Tensor, int]\n high: Union[torch.Tensor, int]\n c2_low_grad: Union[torch.Tensor, int]\n c3_high_grad: Union[torch.Tensor, int]\n\n # Set default values\n low = high = c2_low_grad = c3_high_grad = 0\n\n # Vars to retrieve gradients from first layer\n first_layer: torch.nn.Module = self.model.features[0]\n\n if isinstance(first_layer, rules.LrpZBoxRule):\n # Access high and low copy layers in first layer.\n low = first_layer.low.to(device=DEVICE)\n high = first_layer.high.to(device=DEVICE)\n\n # Reset stored gradients.\n low.grad = None\n high.grad = None\n\n # Reset stored gradients\n input_nchw.grad = None\n\n # Compute explanation by storing value of gradient in input_nchw.grad.\n # Only the predicted class is propagated backwards.\n #\n # 1. Compute forward pass\n forward_pass: torch.Tensor = self.model(input_nchw).to(device=DEVICE)\n\n # 2. Get index of classes to be explained\n idx: torch.Tensor\n if label_idx_n is not None:\n # Compute classes passed as argument explicitly\n idx = label_idx_n.to(device=DEVICE)\n\n # Save index of classes to be explained as instance variable\n self.label_idx_n = label_idx_n\n else:\n # Get index maximum activation in the output layer (index of the predicted class)\n idx = forward_pass.max(dim=1).indices.to(device=DEVICE)\n\n # 3. Create new tensor where elements are tuples (i, idx[i]) with i: counter.\n # Tensor i looks like this: [0, 1, ..., len(idx)]\n i: torch.Tensor = torch.arange(end=len(idx),\n device=DEVICE)\n\n # Stacked tensor looks like this: [(i, idx[i]), (i+1, idx[i+1]), ...],\n # where i is the counter and idx[i] is the index of\n # the maximum activation in the output layer.\n\n # Indices of selected classes are particularly useful for Pixel-Flipping algorithm.\n self.explained_class_indices = torch.stack((i, idx), dim=1)\n\n # 4. One-hot encoding for the predicted class in each sample.\n # This is a mask where the predicted class is True and the rest is False.\n batch_size: int = pf.utils.get_batch_size(input_nchw=input_nchw)\n number_of_classes: int = forward_pass.shape[1]\n # Init zeros tensor for one-hot encoding\n gradient: torch.Tensor = torch.zeros(batch_size,\n number_of_classes,\n dtype=torch.bool,\n device=DEVICE)\n # Set the predicted class to True\n #\n # The following statement should be equivalent to:\n # gradient[*self.explained_class_indices.T] = True\n gradient[self.explained_class_indices[:, 0],\n self.explained_class_indices[:, 1]] = True\n\n # 5. Compute gradient of output layer for the predicted class of each sample.\n forward_pass.backward(gradient=gradient)\n\n if isinstance(first_layer, rules.LrpZBoxRule):\n # Compute gradients\n c2_low_grad = low.grad.to(device=DEVICE)\n c3_high_grad = high.grad.to(device=DEVICE)\n\n # Compute input gradient\n c1_input_grad = input_nchw.grad.to(device=DEVICE)\n\n # Compute relevance\n self.relevance_scores_nchw = (input_nchw * c1_input_grad +\n low * c2_low_grad +\n high * c3_high_grad)\n return self.relevance_scores_nchw.detach().to(device=DEVICE)",
"def predict_category(img_path):\n tensor = path_to_tensor(img_path)\n # WE need to send a tensor to find the bottelneck feature so cnverting the image to a tensor\n\n\n prediction = model_final.predict(tensor)\n\n return np.argmax(prediction)",
"def tfidf_category(self, min_freq:float=0.001, max_freq:float=0.75, ngram:int=1, output:str='graph'):\n \n if not(isinstance(min_freq, float) and min_freq < 1 and min_freq > 0):\n raise ValueError(\"Min_freq must be a float between 0 and 1\")\n if not(isinstance(max_freq, float) and max_freq < 1 and max_freq > 0):\n raise ValueError(\"max_freq must be a float between 0 and 1\")\n if not(isinstance(ngram, int) and ngram >= 1):\n raise ValueError(\"ngram must be an integer greater or equal than 1.\")\n if not(isinstance(output, str) and output in ['graph','table']):\n raise ValueError(\"Select your output type: table or graph?\")\n\n categories = self.table.category.unique()\n print(\"Select an input among\", categories)\n k = input()\n if k not in categories:\n raise ValueError(\"Input must be a single string among the categories\")\n\n document = self.table[self.table.category==k].clean_text.tolist()\n\n tfidf_vectorizer = TfidfVectorizer(ngram_range = (ngram, ngram), \n analyzer='word', \n min_df=min_freq, \n max_df=max_freq, \n stop_words='english', \n sublinear_tf=True)\n\n X = tfidf_vectorizer.fit_transform(document)\n vectorizer = CountVectorizer(ngram_range=(ngram, ngram),\n analyzer = \"word\",\n min_df = min_freq,\n max_df = max_freq,\n stop_words = \"english\")\n X2 = vectorizer.fit_transform(document)\n word_counts = X2.toarray()\n word_tfidf = X.toarray()\n word_tfidf[word_tfidf < 0.2] = 0 # setting to 0 too low frequent words\n df = pd.DataFrame(data = {\"word\": vectorizer.get_feature_names(),\n \"tf\": word_counts.sum(axis = 0),\n \"idf\": tfidf_vectorizer.idf_,\n \"tfidf\": word_tfidf.sum(axis = 0)})\n df.sort_values([\"tfidf\", \"tf\", \"idf\"], ascending = False, inplace=True)\n\n if output=='graph':\n # showing the top 10 ngrams\n df=df.iloc[:10,]\n sns.set_context('poster') \n plt.subplots(figsize=(20,10))\n graph1 = sns.barplot(x=df['word'], y=df['tfidf'], palette=\"rocket\") \n graph1.set_xticklabels(labels = df['word'], rotation=30)\n graph1.set_ylabel(\"TFIDF\",fontsize=40)\n graph1.set_xlabel(\"\")\n graph1.set_title('Top ten {0}-grams in {1}'.format(ngram, \" \".join(k.split(\"_\")).capitalize()), fontsize=40)\n\n else:\n return df.reset_index().iloc[:,1:]",
"def k_nearest_point(activity_category, current_point, person_character=None):\n if person_character == config.ACTIVE:\n k = 10\n elif person_character == config.COMFORTABLE:\n k = 5\n elif not person_character:\n k = 5\n else:\n raise RuntimeError('Wrong Person Character Format')\n\n if activity_category == config.RECREATION:\n with open(config.LEISURE_FILE, 'r') as f:\n leisuredata = json.load(f)\n data_dict = leisuredata\n\n elif activity_category == config.COMMERCIAL:\n with open(config.SHOP_FILE, 'r') as f:\n shopdata = json.load(f)\n data_dict = shopdata\n\n elif activity_category == config.SELFSTUDY or \\\n activity_category == config.ATTEND_CLASS:\n with open(config.UNI_FILE, 'r') as f:\n unidata = json.load(f)\n data_dict = unidata\n\n elif activity_category == config.WORK:\n\n if not os.path.exists(config.COMPANY_FILE):\n basename = os.path.basename(config.COMPANY_FILE)\n filename = os.path.splitext(basename)[0]\n get_company_population(filename)\n\n with open(config.COMPANY_FILE, 'r') as f:\n companydata = json.load(f)\n data_dict = companydata\n\n data = []\n populations = []\n # convert points to WebMercator\n for feature in data_dict['features']:\n if activity_category == config.WORK:\n populations.append(feature[\"properties\"][\"population\"])\n\n lon, lat = feature[\"geometry\"][\"coordinates\"]\n data.append([lon2x(lon), lat2y(lat)])\n\n tree = spatial.cKDTree(data)\n # convert query point to WebMercator\n query_point = lon2x(current_point.x), lat2y(current_point.y)\n dd, ii = tree.query(query_point, k=k)\n\n # ignore all zero distance point\n nonzero_list = [i for i, d in enumerate(dd) if d != 0]\n dd = [dd[i] for i in nonzero_list]\n ii = [ii[i] for i in nonzero_list]\n\n # working place probability based on population\n # other place probability based on 1/distance\n if activity_category == config.WORK:\n probabilties = [populations[i] for i in ii]\n else:\n probabilties = [1 / d for d in dd]\n\n sum_of_prob = sum(probabilties)\n normalized_probs = [p / sum_of_prob for p in probabilties]\n\n neighbors = []\n # recover points to WGS84\n for i in ii:\n x, y = data[i][:2]\n neighbors.append(Point(x2lon(x), y2lat(y)))\n\n return normalized_probs, neighbors",
"def BestSellersQuery(self, category, domain='US'):\n if domain not in dcodes:\n raise Exception('Invalid domain code')\n\n payload = {'key': self.accesskey,\n 'domain': dcodes.index(domain),\n 'category': category}\n\n r = requests.get('https://api.keepa.com/bestsellers/?', params=payload)\n response = r.json()\n\n if 'bestSellersList' in response:\n return response['bestSellersList']['asinList']\n else:\n log.info('Best sellers search results not yet available')",
"def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main",
"def browse_categories():\n print(\"***** Find Businesses by Categories *****\")\n while True:\n print()\n category = input(\n 'Please enter a type of business (category) or type \"back\" or \"quit\": ')\n print()\n if category == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if category == \"back\":\n return\n\n # create a regex pattern for business name\n pattern = r\".*\" + re.escape(category) + r\".*\"\n regx = re.compile(pattern, re.IGNORECASE)\n\n cursor = business_col.find({\"categories\": regx})\n\n business_objects = cursor.limit(10)\n \n if cursor.count() == 0:\n print(\"No businesses found with given category.\")\n continue\n for business_object in business_objects:\n print_business(business_object)",
"def search_videos(self, search_term):\n all_videos = self._video_library.get_all_videos()\n all_videos.sort(key=lambda x: x.title)\n matching_videos = []\n for video in all_videos:\n if search_term.lower() in video.title.lower():\n matching_videos.append(video)\n\n matching_videos.sort(key=lambda x: x.title)\n\n if len(matching_videos) == 0:\n print(f\"No search results for {search_term}\")\n return\n\n print(\"Here are the results for cat:\")\n for i, matching_video in enumerate(matching_videos):\n print(f\"{i + 1}) {str(matching_video)}\")\n\n print(\n \"Would you like to play any of the above? If yes, specify the number of the video.\\nIf your answer is not a valid number, we will assume it's a no.\")\n video_number = input()\n\n # print(video_number)\n\n try:\n int_video_number = int(video_number)\n if int_video_number > len(matching_videos) or int_video_number < 0:\n return\n else:\n self.play_video(matching_videos[int_video_number - 1].video_id)\n except ValueError:\n return",
"def categorize_distances(row):\n category_name = \"\"\n try:\n distance_value = row[\"dist\"]\n value = float(distance_value)\n if value == 0.000000:\n category_name = \"Match\"\n elif 0.000001 <= value <= 0.1:\n category_name = \"Possible Match\"\n elif value > 0.1:\n category_name = \"None Match\"\n except:\n category_name = \"Not Categorised,Invalid Distance Value\"\n return category_name",
"def get_cifar10_category_name(result_tuple):\n categories = {\n 0: \"airplane\",\n 1: \"automobile\",\n 2: \"bird\",\n 3: \"cat\",\n 4: \"deer\",\n 5: \"dog\",\n 6: \"frog\",\n 7: \"horse\",\n 8: \"ship\",\n 9: \"truck\"\n }\n\n if hasattr(result_tuple, \"__len__\"):\n i = np.argmax(result_tuple)\n else:\n i = result_tuple\n\n return categories.get(i, \"InvalidCategory\")",
"def __make_relevance(self, item, keywords, fltr, fuzzy=False):\n penalty = 1\n\n # Prepare attribute set\n values = []\n for prop in item.properties:\n values.append(prop.value)\n\n # Walk thru keywords\n if keywords:\n for keyword in keywords:\n\n # No exact match\n if not keyword in values:\n penalty *= 2\n\n # Penalty for not having an case insensitive match\n elif not keyword.lower() in [s.value.lower() for s in item.properties]:\n penalty *= 4\n\n # Penalty for not having the correct category\n elif fltr['category'] != \"all\" and fltr['category'].lower() != item['_type'].lower():\n penalty *= 2\n\n # Penalty for not having category in keywords\n if item._type in self.__search_aid['aliases']:\n if not set([t.lower() for t in self.__search_aid['aliases'][item._type]]).intersection(set([k.lower() for k in keywords])):\n penalty *= 6\n\n # Penalty for secondary\n if fltr['secondary'] == \"enabled\":\n penalty *= 10\n\n # Penalty for fuzzyness\n if fuzzy:\n penalty *= 10\n\n return penalty",
"def getCategory():",
"def search_descriptions(companies, main_company, top_count=5):\n stemmer = SnowballStemmer(\"english\")\n main_company_desc = strip_clean_stem_description(main_company.desc, stemmer)\n distances = {\"name\":[], \"distance\":[]}\n\n # If description is basically nothing\n if (\"undisclosed\" in main_company.desc) or (\"stealth\" in main_company.desc):\n # No keywords either? No search can be done\n if pd.isnull(main_company.keywords):\n return None\n\n # No description but had keywords? I can use that\n else:\n return list(companies.sort_values(by=\"match_fraction\", \n ascending=0)[:top_count][\"name\"])\n\n else:\n for ii in range(len(companies)):\n company = companies.iloc[ii]\n company_desc = strip_clean_stem_description(company.desc, stemmer)\n # Getting cosine distance between two descriptions\n dist = calculate_cosine_dist(main_company_desc, company_desc)\n distances[\"name\"].append(company[\"name\"])\n # Synonyms shouldn't be as important as straight keywords.\n weight = company[\"match_fraction\"] + 0.5 * company[\"syn_match_frac\"]\n distances[\"distance\"].append(dist * weight)\n\n # the higher the distance, the better the match\n distances = pd.DataFrame(distances).sort_values(by=\"distance\", \n ascending=0)\n if len(distances) > top_count:\n return list(distances[:top_count][\"name\"])\n\n else:\n return list(distances[\"name\"])",
"def _filter_to_most_specific(self, graph, classlist):\n candidates = {}\n for brickclass in classlist:\n sc_query = f\"SELECT ?subclass WHERE {{ ?subclass rdfs:subClassOf+ <{brickclass}> }}\"\n subclasses = set([x[0] for x in graph.query(sc_query)])\n # if there are NO subclasses of 'brickclass', then it is specific\n if len(subclasses) == 0:\n candidates[brickclass] = 0\n continue\n # 'subclasses' are the subclasses of 'brickclass'. If any of these appear in\n # 'classlist', then we know that 'brickclass' is not the most specific\n intersection = set(classlist).intersection(subclasses)\n if len(intersection) == 1 and brickclass in intersection:\n candidates[brickclass] = 1\n else:\n candidates[brickclass] = len(intersection)\n most_specific = None\n mincount = float(\"inf\")\n for specific, score in candidates.items():\n if score < mincount:\n most_specific = specific\n mincount = score\n return most_specific",
"def best_match(beer):\n # get a list of donuts that match sugar content for beer\n candidates = get_candidates(beer)\n span = tracer.current_span()\n span.set_tag('donuts.candidates', candidates)\n\n # send the remaining candidates to our taster and pick the best\n max_score = -1\n best_match = None\n\n for candidate in candidates:\n try:\n resp = requests.get(\n \"http://taster:5001/taste\",\n params={\"beer\": beer.name, \"donut\": candidate},\n timeout=2,\n )\n except requests.exceptions.Timeout:\n continue\n\n score = resp.json()[\"score\"]\n if score > max_score:\n max_score = score\n best_match = candidate\n\n return best_match",
"def _get_category(identifier: str) -> str:\n for category, keywords in categories.items():\n # Check for each keyword\n for k in keywords:\n # Check if lower-case keyword is substring of lower-case identifier\n if identifier.lower().find(k.lower()) != -1:\n return category\n # Default value if no category was found\n return 'other'",
"def search():\n query = request.form.get(\"query\")\n category = list(mongo.db.tips.find({\"$text\": {\"$search\": query}}))\n return render_template(\"tips.html\", category=category)",
"def score_catalog_match(catalog, observed):\n for catalog_word in catalog.get_items():\n for observed_word in observed:\n if catalog_word.lower() == observed_word.lower():\n catalog.increment_score(1)\n return catalog\n\n\n # score = catalog.initial_score # Chronological orderings favored over anit-chronological orderings\n # for catalog_item in catalog.items:\n # # Assume for now that the catalog item is one word to be matched. TODO fix.\n # for observed_item in observed:\n # if catalog_item.lower() == observed_item.lower():\n # score += 1\n # return (score, catalog)\n #score = sum(exact_match(observed_item, catalog_item) for observed_item in observed)\n\n #return score",
"def relevance_ranking(data, ranked_list, gamma=0.5, stop_prob=0.7):\n total_relevance = 0\n for query in ranked_list:\n exposure = 1.0\n for doc in query[1]:\n relevance = doc[0]\n\n total_relevance += exposure * relevance * stop_prob\n\n exposure *= gamma\n exposure *= (1 - stop_prob * relevance)\n return total_relevance / len(ranked_list)",
"def reviews(category=\"All\"):\n if category == \"All\" and not request.args.get('search'):\n flash(\"You didn't enter any search criteria!\", \"error\")\n return redirect(request.referrer or url_for('index'))\n\n # Converts the search perameters to a dictionary\n\n search_params = request.args.to_dict()\n\n # Adds the category for category searches to the search_perams dictionary\n\n if category != \"All\":\n search_params['categories'] = category\n\n # Gets the query dictionary\n\n query = search(search_params, category)\n\n # Sets the page title\n\n if category == \"All\":\n page_title = \"Reviews\"\n\n else:\n page_title = category\n\n \"\"\"\n Gets the products which match the search criteria from the database and\n sorts them. Sort method is from https://docs.mongodb.com/manual/reference/\n method/cursor.sort/index.html\n \"\"\"\n products = list(mongo.db.products.find(query).sort(\n sort_items(request.args.get(\"sort\"))))\n\n \"\"\"\n Gets the filters from the database. Code for returning selected fields from\n https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/\n \"\"\"\n if category == \"All\":\n filters = list(mongo.db.categories.find({}, {\"name\": 1, \"_id\": 0}))\n\n else:\n filters = mongo.db.categories.find_one(\n {\"name\": category}, {\"brands\": 1, \"prices\": 1, \"_id\": 0})\n\n # Gets the number of products in the products list\n\n total = len(products)\n\n \"\"\"\n Paginates the products. Code is from https://gist.github.com/mozillazg/\n 69fb40067ae6d80386e10e105e6803c9\n \"\"\"\n page, per_page, offset = get_page_args(\n page_parameter='page', per_page_parameter='per_page', per_page=6)\n pagination_products = paginate_items(products, offset, per_page)\n pagination = paginate(products, page, per_page)\n\n \"\"\"\n Generates pagination info. Code is adapted from\n https://pythonhosted.org/Flask-paginate/\n \"\"\"\n record_numbers = pagination.info[48:53]\n\n if category == \"All\":\n pagination_info = 'Displaying {} of {} reviews found for \"{}\"'.format(\n record_numbers, total, search_params['search'])\n else:\n pagination_info = 'Displaying {} of {} reviews'.format(\n record_numbers, total)\n\n return render_template(\n \"reviews.html\",\n page_title=page_title,\n filters=filters,\n selected_category=search_params.get('categories'),\n selected_brands=search_params.get('brands'),\n selected_price=search_params.get('price'),\n products=pagination_products,\n pagination_info=pagination_info,\n total=total,\n page=page,\n per_page=per_page,\n pagination=pagination\n )"
] |
[
"0.5779498",
"0.5776032",
"0.57445335",
"0.5656867",
"0.56513745",
"0.5642086",
"0.5516284",
"0.5513393",
"0.5420318",
"0.54119575",
"0.53935814",
"0.5359446",
"0.532522",
"0.5297809",
"0.5292416",
"0.5278605",
"0.52570313",
"0.5227728",
"0.52234787",
"0.52224135",
"0.52097696",
"0.5199643",
"0.517219",
"0.5162983",
"0.5162358",
"0.5160729",
"0.51602113",
"0.5118577",
"0.5099927",
"0.5082551",
"0.5078048",
"0.5074396",
"0.5057796",
"0.5054986",
"0.50471604",
"0.5045911",
"0.50390005",
"0.502726",
"0.5026286",
"0.5024803",
"0.5013487",
"0.5008691",
"0.49960083",
"0.49929354",
"0.49894884",
"0.4980638",
"0.49725154",
"0.49668586",
"0.49618554",
"0.49580875",
"0.4949739",
"0.49427602",
"0.49405998",
"0.49375072",
"0.49288327",
"0.49265146",
"0.49237856",
"0.49142432",
"0.48996866",
"0.48961782",
"0.4893021",
"0.48884562",
"0.48742238",
"0.4871932",
"0.48600864",
"0.48598695",
"0.48516667",
"0.48417008",
"0.48401105",
"0.48230362",
"0.4821498",
"0.48168272",
"0.48088294",
"0.4803472",
"0.47966036",
"0.47892752",
"0.4783497",
"0.478333",
"0.4782724",
"0.47764844",
"0.476869",
"0.47646305",
"0.47583708",
"0.4752903",
"0.47504008",
"0.47486553",
"0.47373915",
"0.47349018",
"0.47322285",
"0.47203064",
"0.47157794",
"0.47145936",
"0.47115606",
"0.4706203",
"0.47057518",
"0.47024912",
"0.4701255",
"0.4700297",
"0.46977234",
"0.46974576"
] |
0.66086555
|
0
|
UserBase a model defined in OpenAPI
|
def __init__(self, email: str=None, is_bot: bool=None, avatar_url: str=None, avatar_version: int=None, full_name: str=None, is_admin: bool=None, is_owner: bool=None, is_billing_admin: bool=None, role: int=None, bot_type: int=None, user_id: int=None, bot_owner_id: int=None, is_active: bool=None, is_guest: bool=None, timezone: str=None, date_joined: str=None, delivery_email: str=None, profile_data: Dict[str, object]=None):
self.openapi_types = {
'email': str,
'is_bot': bool,
'avatar_url': str,
'avatar_version': int,
'full_name': str,
'is_admin': bool,
'is_owner': bool,
'is_billing_admin': bool,
'role': int,
'bot_type': int,
'user_id': int,
'bot_owner_id': int,
'is_active': bool,
'is_guest': bool,
'timezone': str,
'date_joined': str,
'delivery_email': str,
'profile_data': Dict[str, object]
}
self.attribute_map = {
'email': 'email',
'is_bot': 'is_bot',
'avatar_url': 'avatar_url',
'avatar_version': 'avatar_version',
'full_name': 'full_name',
'is_admin': 'is_admin',
'is_owner': 'is_owner',
'is_billing_admin': 'is_billing_admin',
'role': 'role',
'bot_type': 'bot_type',
'user_id': 'user_id',
'bot_owner_id': 'bot_owner_id',
'is_active': 'is_active',
'is_guest': 'is_guest',
'timezone': 'timezone',
'date_joined': 'date_joined',
'delivery_email': 'delivery_email',
'profile_data': 'profile_data'
}
self._email = email
self._is_bot = is_bot
self._avatar_url = avatar_url
self._avatar_version = avatar_version
self._full_name = full_name
self._is_admin = is_admin
self._is_owner = is_owner
self._is_billing_admin = is_billing_admin
self._role = role
self._bot_type = bot_type
self._user_id = user_id
self._bot_owner_id = bot_owner_id
self._is_active = is_active
self._is_guest = is_guest
self._timezone = timezone
self._date_joined = date_joined
self._delivery_email = delivery_email
self._profile_data = profile_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_base() -> typing.Any:\n # pylint: disable=no-member\n return open_alchemy.models.Base # type: ignore",
"def user_model(username):\n return {\n 'username': username,\n }",
"def user_model(self): \n return self.auth.store.user_model",
"def convert_api_to_usr_model(self):\n\n usr_model = dict()\n self._copy_api_entry('ApplicationName', usr_model)\n self._copy_api_entry('DateUpdated', usr_model)\n if 'ResourceLifecycleConfig' in self.api_model:\n usr_model['Configurations'] = self.api_model['ResourceLifecycleConfig']\n else:\n usr_model['Configurations'] = DEFAULT_LIFECYCLE_CONFIG\n\n if 'ServiceRole' not in usr_model['Configurations']:\n try:\n role = get_role(DEFAULT_LIFECYCLE_SERVICE_ROLE)\n if u'Arn' in role:\n arn = role[u'Arn']\n else:\n arn = DEFAULT_ARN_STRING\n except (NotFoundError, ServiceError):\n arn = DEFAULT_ARN_STRING\n\n usr_model['Configurations']['ServiceRole'] = arn\n\n return usr_model",
"def user():\n user = UserModel()\n user.email = \"lemmy@imotorhead.com\"\n user.first_name = \"Ian\"\n user.last_name = \"Kilmister\"\n user.phone = \"800.333.7680\"\n return user",
"def user(self):\r\n return resource.User(self)",
"def __init__(__self__,\n resource_name: str,\n args: AppUserBaseSchemaPropertyArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def test_User_instance(self):\n obj = User()\n self.assertIsInstance(obj, User)\n self.assertIsInstance(obj, BaseModel)",
"def user(self):",
"def __init__(self, user: User):\n self.user = user",
"def test_inherit(self):\n self.assertTrue(issubclass(User, BaseModel))",
"def user(self):\r\n return resources.User(self)",
"def user(self):\r\n return users.User(self)",
"def proto_user(self):\n return baker.make(User)",
"def proto_user(self):\n return baker.make(User)",
"def proto_user(self):\n return baker.make(User)",
"def __init__(self, base_api: BaseApi):\n super().__init__(base_api, self.__class__.__name__)",
"def test_inheritance(self):\n self.assertTrue(issubclass(type(self.user_1), BaseModel))",
"def user(self):\n pass",
"def user(self) -> \"User\":\n return User(connection=self)",
"def me(self):\r\n return User(self)",
"def __init__(self):\r\n self.__type = ALL_USERS\r\n self.__user = None",
"def user() -> settings.AUTH_USER_MODEL:\n\n return UserFactory()",
"def test_api_user_get(self):\n pass",
"def user():\n return UserFactory",
"def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)",
"def user_cls(self):\n return self.get_entity_cls('user')",
"def factory():\n return BaseUserProfileQuery()",
"def create_user_model():\n\n user_id_url = root_url + \"/{}/{}\".format(\"user\", session['user_id'])\n\n user_info = requests.get(\n url=user_id_url,\n headers={ 'Authorization': api_key },\n ).json()\n\n if user_info['isStudent']:\n session['user_role'] = 'student'\n student.get_user_id()\n student.get_user_details()\n student.get_user_bids()\n student.get_user_competencies()\n student.get_user_qualifications()\n student.get_contract_number()\n student.get_user_contract()\n student.initialized = True\n elif user_info['isTutor']:\n session['user_role'] = 'tutor'\n tutor.get_user_id()\n tutor.get_user_details()\n tutor.get_user_bids()\n tutor.get_user_competencies()\n tutor.get_user_qualifications()\n tutor.get_user_contract()\n tutor.initialized = True\n else:\n raise Exception(\"user is not student and tutor. What is the user role?\")\n\n print(student, tutor)\n\n return student, tutor",
"def user_model(username, id=1, is_admin=False):\n user = {\n 'username': username,\n 'id': id,\n }\n if is_admin:\n # Some versions of the API do not return the is_admin property\n # for non-admin users (See #115).\n user['is_admin'] = True\n return user",
"def author(self) -> \"api.User\":\n raise NotImplementedError",
"def user(self, *args, **kwargs) -> User:\n return User(self.handle, *args, **kwargs)",
"def __init__(self, *args, **kwargs):\n super(UserModelUnitTest, self).__init__(*args, **kwargs)",
"def __init__(self): # noqa: E501\n self.openapi_types = {\n }\n\n self.attribute_map = {\n }",
"def __init__(self, *args, **kwargs):\n\n Model.__init__(self, *args, **kwargs)\n\n self.collection = 'users'\n self.mdb = app.config['MDB'][self.collection]\n\n self.data_model = {\n 'name': str,\n 'email': str,\n 'password': str,\n 'created_on': datetime,\n }\n\n self.required_attribs = [\n 'name',\n 'email',\n 'password'\n ]\n\n # try to set _id from email before we hand off to the base class\n # load() method.\n rec = self.mdb.find_one({'email': self.kwargs.get('email', None)})\n if rec is not None:\n self.kwargs['_id'] = rec['_id']\n\n self.load()",
"def __init__(self, connection, user):\n super().__init__(connection)\n self.user = user",
"def __init__(self, user=None):\n self.user = user",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n app_id: Optional[pulumi.Input[str]] = None,\n index: Optional[pulumi.Input[str]] = None,\n master: Optional[pulumi.Input[str]] = None,\n pattern: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[str]] = None,\n required: Optional[pulumi.Input[bool]] = None,\n title: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n user_type: Optional[pulumi.Input[str]] = None) -> 'AppUserBaseSchemaProperty':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AppUserBaseSchemaPropertyState.__new__(_AppUserBaseSchemaPropertyState)\n\n __props__.__dict__[\"app_id\"] = app_id\n __props__.__dict__[\"index\"] = index\n __props__.__dict__[\"master\"] = master\n __props__.__dict__[\"pattern\"] = pattern\n __props__.__dict__[\"permissions\"] = permissions\n __props__.__dict__[\"required\"] = required\n __props__.__dict__[\"title\"] = title\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"user_type\"] = user_type\n return AppUserBaseSchemaProperty(resource_name, opts=opts, __props__=__props__)",
"def __init__(self):\n self.swagger_types = {\n 'owner_id': 'str',\n 'created_at': 'datetime',\n 'identifier': 'str',\n 'identifier_type': 'str',\n 'default_language': 'str',\n 'optional_identifier': 'str',\n 'id': 'str',\n 'v': 'float',\n 'id': 'str',\n 'case_records': 'list[str]'\n }\n\n self.attribute_map = {\n 'owner_id': '_ownerId',\n 'created_at': '_createdAt',\n 'identifier': 'identifier',\n 'identifier_type': 'identifierType',\n 'default_language': 'defaultLanguage',\n 'optional_identifier': 'optionalIdentifier',\n 'id': '_id',\n 'v': '__v',\n 'case_records': 'caseRecords'\n }\n\n self._owner_id = None\n self._created_at = None\n self._identifier = None\n self._identifier_type = None\n self._default_language = None\n self._optional_identifier = None\n self._id = None\n self._v = None\n self._id = None\n self._case_records = None",
"def user(self, user_id):\r\n return User(self, user_id)",
"def user(self):\n\n return self.user_model",
"def modelClass(self):\n raise NotImplementedError",
"def get_model():\n return UNISAL",
"def __init__(self, user, key):\n super(User, self).__init__(user=user, key=key)\n self.Ratings = User_Ratings(user, key)\n \"\"\"\n Allows to retrieve, add and delete user ratings.\n \"\"\"",
"def test_model(self):\n self.assertEqual(USER_MODEL, CustomUser)",
"def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'uses_git': 'bool',\n 'git_remote_url': 'str',\n 'git_username': 'str',\n 'git_password': 'str',\n 'git_username_user_attribute': 'str',\n 'git_password_user_attribute': 'str',\n 'git_service_name': 'str',\n 'deploy_secret': 'str',\n 'unset_deploy_secret': 'bool',\n 'pull_request_mode': 'str',\n 'validation_required': 'bool',\n 'allow_warnings': 'bool',\n 'is_example': 'bool',\n 'can': 'dict(str, bool)'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'uses_git': 'uses_git',\n 'git_remote_url': 'git_remote_url',\n 'git_username': 'git_username',\n 'git_password': 'git_password',\n 'git_username_user_attribute': 'git_username_user_attribute',\n 'git_password_user_attribute': 'git_password_user_attribute',\n 'git_service_name': 'git_service_name',\n 'deploy_secret': 'deploy_secret',\n 'unset_deploy_secret': 'unset_deploy_secret',\n 'pull_request_mode': 'pull_request_mode',\n 'validation_required': 'validation_required',\n 'allow_warnings': 'allow_warnings',\n 'is_example': 'is_example',\n 'can': 'can'\n }\n\n self._id = None\n self._name = None\n self._uses_git = None\n self._git_remote_url = None\n self._git_username = None\n self._git_password = None\n self._git_username_user_attribute = None\n self._git_password_user_attribute = None\n self._git_service_name = None\n self._deploy_secret = None\n self._unset_deploy_secret = None\n self._pull_request_mode = None\n self._validation_required = None\n self._allow_warnings = None\n self._is_example = None\n self._can = None",
"def __init__(self, networkapi_url, user, password, user_ldap=None):\n super(\n RoteiroEquipamento,\n self).__init__(\n networkapi_url,\n user,\n password,\n user_ldap)",
"def sample_user(self):",
"def __init__(self, model: str, **kwargs):\n super().__init__(model=model)",
"def user():",
"def __init__(self, **kwargs):\n APIBaseModel.__init__(self, **kwargs)\n self.flavor = Flavor(**self.flavor)",
"def create_model(self, ApiId: str, Name: str, Schema: str, ContentType: str = None, Description: str = None) -> Dict:\n pass",
"def model(self) -> Type[Model]:",
"def __init__(self, api_token=None, base_url=None, mount_accessor=None, org_name=None, primary_email=None, production=None, username_format=None): # noqa: E501 # noqa: E501\n self._api_token = None\n self._base_url = None\n self._mount_accessor = None\n self._org_name = None\n self._primary_email = None\n self._production = None\n self._username_format = None\n self.discriminator = None\n if api_token is not None:\n self.api_token = api_token\n if base_url is not None:\n self.base_url = base_url\n if mount_accessor is not None:\n self.mount_accessor = mount_accessor\n if org_name is not None:\n self.org_name = org_name\n if primary_email is not None:\n self.primary_email = primary_email\n if production is not None:\n self.production = production\n if username_format is not None:\n self.username_format = username_format",
"def test_default_model_init(User):\n user = User(id=uuid.uuid4(), email=\"user@domain.com\")\n assert user.email == \"user@domain.com\"\n assert not hasattr(user, \"name\")",
"def register_orm_base(self, base):\n for model in utils.searchable_sqlalchemy_models(base):\n self.register_type(model.es_type_name, model.es_properties, model)",
"def get_model(model=gin.REQUIRED):\n return model",
"def user(self, user_id):\r\n return resources.User(self, user_id)",
"def __init__(self, unique_user_ids, unique_item_ids, **kwargs):\n super().__init__(**kwargs)\n self.user_model = EntityModel(unique_user_ids)\n self.item_model = EntityModel(unique_item_ids)\n self.logger = logging.getLogger()",
"def setUpClass(self):\n\n base_model = BaseModel()",
"def test_is_subclass(self):\n user = User()\n user_details = {\"user_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertIsInstance(student, BaseModel)\n self.assertTrue(hasattr(student, \"id\"))\n self.assertTrue(hasattr(student, \"created_at\"))\n self.assertTrue(hasattr(student, \"updated_at\"))\n self.assertTrue(hasattr(student, \"user_id\"))",
"def build_user(data: Dict[Any, Any]) -> User:\n return User(**data)",
"def __init__(self, app, user):\n\n self.app = app\n self.user = user",
"def user(self):\n return self.create_user",
"def me(self):\n return User(self, ResourcePath(\"me\", None))",
"def model(self):",
"def model(self):",
"def model(self):",
"def model(self):",
"def model(self):",
"def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n\n # Set default role for a regular new User\n self.role = Role.query.filter_by(default=True).first()",
"def __init__(self):\n self.conf = None\n self.section = None\n self._engine = None\n self._session = None\n self.base_model = declarative_base()",
"def from_dict(cls, dikt: dict) -> 'UserBase':\n return util.deserialize_model(dikt, cls)",
"def get(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user_schema.dump(user)",
"def get_one_user():",
"def __init__(self, url='', credentials=None,\n get_credentials=True, http=None, model=None,\n log_request=False, log_response=False,\n credentials_args=None, default_global_params=None,\n additional_http_headers=None, response_encoding=None):\n url = url or self.BASE_URL\n super(ServiceuserV1, self).__init__(\n url, credentials=credentials,\n get_credentials=get_credentials, http=http, model=model,\n log_request=log_request, log_response=log_response,\n credentials_args=credentials_args,\n default_global_params=default_global_params,\n additional_http_headers=additional_http_headers,\n response_encoding=response_encoding)\n self.projects_services = self.ProjectsServicesService(self)\n self.projects = self.ProjectsService(self)\n self.services = self.ServicesService(self)",
"def __init__(self, **kwargs):\n Base.__init__(self, **kwargs)\n CashFlowUser.__init__(self)\n self.name = None\n self._produces = []\n self._stores = []\n self._demands = []",
"def test_create_user_object():\n from .scripts.initializedb import create_user_object\n user_object = create_user_object(\"test\", \"test\", \"test\")\n assert isinstance(user_object, User)",
"def forwards(self, orm):\n \n location_type = orm['mooi.Location']\n if len(location_type.objects.all()) == 0:\n l = location_type(city=\"Lethbridge\", country=\"Canada\")\n l.save()\n else:\n l = location_type.objects.all()[0]\n \n \n user_type = orm['auth.User']\n user = user_type(username=self.suUsername, email=self.suEmail, password=self.suPassword)\n user.first_name = self.suFirstName\n user.last_name = self.suLastName\n user.is_superuser = True\n user.save()\n \n profile_type = orm['mooi.Profile']\n userProfile = profile_type(user=user, location=l, phone=self.suPhone)\n userProfile.location = l\n userProfile.phone = self.suPhone\n userProfile.save()",
"def test_api_user_put(self):\n pass",
"def get_user_model(self):\n try:\n return django_apps.get_model('clientArea.CustomUser')\n except ValueError:\n raise ImproperlyConfigured(\"AUTH_USER_MODEL must be of the form 'app_label.model_name'\")\n except LookupError:\n raise ImproperlyConfigured(\n \"AUTH_USER_MODEL refers to model '%s' that has not been installed\" % settings.AUTH_USER_MODEL\n )",
"def __init__(self, base, **kwargs):\n self.base = base",
"def user(request, user_id):\n raise NotImplementedError",
"def setUp(self):\n self.base1 = BaseModel()",
"def auth(self, user):",
"def __init__(self, api, coordinator, name, dev_id, model):\n super().__init__(api, coordinator, name, dev_id)\n\n self._model = model\n\n self._is_on = False\n\n self._unique_id = f\"{dev_id}-plug\"",
"def __init__(self, session):\n super(FlattrApi, self).__init__(session)\n self.things = ThingApi(session)\n self.users = UsersApi(session)\n self.authenticated = AuthenticatedApi(session)",
"def model() -> Model:\n return Model()",
"def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)",
"def add_metadata(self, environ, identity):\n \n \n if userid:\n log.debug(\"userid of %s\" % userid)\n try:\n from vc.model import user\n u = user.user(userid)\n u.become(u.role)\n except FunctionError, e:\n if \"model\" in identity:\n del identity['model']\n return None\n identity['model'] = u",
"def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel",
"def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)",
"def setUp(self):\n user = Users.query.first()",
"def __init__(self):\n self.db = ALL_USERS",
"def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n\n if not __name__ == cls.__module__:\n # e.g.: cls.__module__ = mpcontribs.api.projects.views\n views_path = cls.__module__.split(\".\")\n doc_path = \".\".join(views_path[:-1] + [\"document\"])\n cls.tags = [views_path[-2]]\n doc_filepath = doc_path.replace(\".\", os.sep) + \".py\"\n if os.path.exists(doc_filepath):\n cls.doc_name = cls.tags[0].capitalize()\n Model = getattr(import_module(doc_path), cls.doc_name)\n cls.schema_name = cls.doc_name + \"Schema\"\n cls.Schema = type(\n cls.schema_name,\n (ModelSchema, object),\n {\n \"Meta\": type(\n \"Meta\",\n (object,),\n dict(model=Model, ordered=True, model_build_obj=False),\n )\n },\n )\n cls.definitions = {cls.schema_name: schema2jsonschema(cls.Schema)}\n cls.resource.schema = cls.Schema\n\n # write flask-mongorest swagger specs\n for method in cls.methods:\n spec = get_specs(cls, method, cls.tags[0])\n if spec:\n dir_path = os.path.join(DOC_DIR, cls.tags[0])\n file_path = os.path.join(dir_path, method.__name__ + \".yml\")\n if not os.path.exists(file_path):\n os.makedirs(dir_path, exist_ok=True)\n\n if is_gunicorn:\n with open(file_path, \"w\") as f:\n yaml.dump(spec, f)\n logger.debug(\n f\"{cls.tags[0]}.{method.__name__} written to {file_path}\"\n )",
"def __init__(self, user):\n\n if isinstance(user, dict):\n # Every user must have these values\n self.id = user['id']\n self.name = user['first_name']\n\n # These are optional\n self.username = user.get('username', None)\n self.last_name = user.get('last_name', None)\n\n elif isinstance(user, tuple):\n # If a tuple was given, it has to be a 4-tuple\n self.id = user[0]\n self.name = user[1]\n self.last_name = user[2]\n self.username = user[3]\n\n # Special case, our loved admin!\n self.is_admin = self.id == 10885151 # @Lonami",
"def test_save(self):\n\n base_class = BaseModel()",
"def swagger_definition(self, base_path=None, **kwargs):\n return Swagger(\n {\n \"info\": Info(\n {\n key: kwargs.get(key, self.DEFAULT_INFO.get(key))\n for key in Info.fields.keys()\n if key in kwargs or key in self.DEFAULT_INFO\n }\n ),\n \"paths\": self.paths,\n \"swagger\": \"2.0\",\n \"basePath\": base_path,\n }\n ).to_primitive()",
"def __init__(self, model: object):\n self.model = model",
"def __init__(self,\r\n username=None,\r\n first_name=None,\r\n last_name=None,\r\n application_id=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.username = username\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n self.application_id = application_id\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties"
] |
[
"0.61172575",
"0.5993476",
"0.5881623",
"0.58649784",
"0.5860354",
"0.5810632",
"0.57947624",
"0.5793472",
"0.5785655",
"0.57642925",
"0.5711532",
"0.57008547",
"0.56957424",
"0.56417537",
"0.56417537",
"0.56417537",
"0.5575235",
"0.55717164",
"0.5571534",
"0.55618167",
"0.55610824",
"0.5560262",
"0.5557235",
"0.55497885",
"0.55472213",
"0.54520917",
"0.5435926",
"0.54233277",
"0.5394816",
"0.53930897",
"0.53800863",
"0.5374902",
"0.53647244",
"0.5345077",
"0.5323725",
"0.5312281",
"0.5295401",
"0.5289949",
"0.5283285",
"0.5267066",
"0.5266542",
"0.5264142",
"0.5263365",
"0.52611315",
"0.5258936",
"0.52566546",
"0.5248371",
"0.5229258",
"0.52288103",
"0.52211475",
"0.52166927",
"0.52127504",
"0.5212252",
"0.5207661",
"0.52064246",
"0.52041453",
"0.5197046",
"0.51847565",
"0.51774293",
"0.51760024",
"0.5169637",
"0.5163156",
"0.51626927",
"0.5157911",
"0.51572025",
"0.51570284",
"0.51570284",
"0.51570284",
"0.51570284",
"0.51570284",
"0.515323",
"0.51526725",
"0.5148064",
"0.51444966",
"0.5114454",
"0.5109742",
"0.51046616",
"0.510214",
"0.50939494",
"0.5091688",
"0.5079319",
"0.5077883",
"0.50775176",
"0.5075506",
"0.50747734",
"0.50698346",
"0.5067811",
"0.5059036",
"0.505735",
"0.50533205",
"0.5053152",
"0.50511706",
"0.50401485",
"0.5003719",
"0.49907157",
"0.49894488",
"0.4986553",
"0.49838477",
"0.49731943",
"0.4968282"
] |
0.5249836
|
46
|
Returns the dict as a model
|
def from_dict(cls, dikt: dict) -> 'UserBase':
return util.deserialize_model(dikt, cls)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def from_dict(cls, dikt) -> 'ModelClass':\n return util.deserialize_model(dikt, cls)",
"def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return util.deserialize_model(dikt, cls)",
"def instance_to_model(self):\n pass",
"def from_dict(cls, dikt):\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt):\n return deserialize_model(dikt, cls)",
"def get_modelDict(self):\n return self.__modelDict",
"def from_dict(cls, dikt) -> 'ModelsArray':\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'VultrExtra':\n return util.deserialize_model(dikt, cls)",
"def json_to_model(cls, data):\n m = cls.to_model(data)\n m.raw = data\n cls._unlock_unmarshalling(m)\n cls.set_additional_fields(m, data)\n return m",
"def from_dict(cls, dikt) -> 'TaskModel':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> \"Todo\":\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Data':\n return util.deserialize_model(dikt, cls)",
"def to_model(self, payload):\n return payload",
"def from_dict(cls, dikt) -> 'Garage':\n return util.deserialize_model(dikt, cls)",
"def to_obj(self):\n return dict()",
"def load_data_model() -> dict:\n data_model = json.loads(DATA_MODEL_JSON)\n data_model[\"_id\"] = \"id\"\n data_model[\"timestamp\"] = \"now\"\n return data_model",
"def to_object(cls, query_dict: Dict):\n pass",
"def __load_model(self, company_id: str) -> dict:\n model = self.__redis.retrieve(key=company_id, field=\"\")\n if not model:\n model = {company_id: {\"TF_W\": {},\n \"diff_pos\": [],\n \"diff_neg\": []}}\n return model",
"def from_dict(cls, dikt) -> 'SkillPropertyModel':\n return util.deserialize_model(dikt, cls)",
"def to_model(self, payload):\n if self.skip:\n raise SkipField\n\n model = self.get_or_initialize_model(payload)\n model = self.update_model_fields(model, payload)\n return model",
"def from_dict(cls, dikt) -> 'SearchResult':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Reviewer':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'ResultItemSet':\n return util.deserialize_model(dikt, cls)",
"def to_model(cls, obj):\n\n new_model = cls()\n\n for key, value in obj.iteritems():\n if value:\n if key == 'transcripts':\n setattr(new_model, key, [ModelConverter.to_model(Transcript, t) for t in value])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [ModelConverter.to_model(Act, a) for a in value])\n elif key == 'subtitles':\n setattr(new_model, key, [ModelConverter.to_model(Subtitle, s) for s in value])\n else:\n setattr(new_model, key, value)\n\n return new_model",
"def from_dict(cls, session: \"FigoSession\", data_dict: dict) -> \"ModelBase\":\n return cls(session, **data_dict)",
"def from_dict(cls, dikt: dict) -> 'DutyWhere':\n return util.deserialize_model(dikt, cls)",
"def get_storage_model(self) -> Dict[str, Any]:\n return self.data.dict()",
"def from_dict(cls, dikt) -> 'SmartSsd':\n return util.deserialize_model(dikt, cls)",
"def test_todictreturntype(self):\n b1 = BaseModel()\n self.assertEqual(type(b1.to_dict()), dict)",
"def from_dict(cls, dikt) -> 'InlineResponse201':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Body':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> \"InlineResponse201\":\n return util.deserialize_model(dikt, cls)",
"def MakeModel(self):\n pass",
"def convert_to_model(self, *args):",
"def model(self) -> Type[Model]:",
"def dict(self):\n return objToDict(self)",
"def get_model(self, key: str = None, **kwargs) -> Dict:\n raise NotImplementedError",
"def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)",
"def model() -> Model:\n return Model()",
"def to_model(self, obj):\n if obj is None:\n raise UnprocessableEntity(\"expected data in request, was empty\", what=BAD_VALUE)\n \n if not isinstance(obj, Mapping):\n raise UnprocessableEntity(\"expected data object in request\", what=BAD_VALUE)\n \n return {k: self.cols_to_model[k](v) for k, v in obj.items() if k in self.cols_to_model}",
"def from_dict(cls, dikt) -> 'ResultFeedback':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'InlineResponse200':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'AnalysisResultResults':\n return util.deserialize_model(dikt, cls)",
"def _get_dict_model(cls, key, model, spec):\n try:\n return model[key]\n except KeyError:\n raise ObjectNotFoundError(path=spec[\"full_path\"])",
"def test_basedict(self):\n tester = BaseModel()\n self.assertTrue(dict, type(tester.to_dict()))",
"def from_dict(cls, dikt) -> 'TripResponse':\n return util.deserialize_model(dikt, cls)",
"def model_dict(self) -> dict:\n model_dict = dict()\n for key, value in self.kwargs.items():\n current = model_dict\n if not isinstance(key, tuple):\n key = (key,)\n for part in key[:-1]:\n if part not in current:\n current[part] = dict()\n current = current[part]\n current[key[-1]] = value\n return model_dict",
"def from_dict(cls, dikt) -> 'Body':\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'BundleData':\n return util.deserialize_model(dikt, cls)",
"def py_dict(t, m):\n class Model(BaseModel):\n f: t\n\n model = Model(f=m)\n\n return model.dict(by_alias=True)['f']",
"def model_to_instance(model):\n pass",
"def from_dict(cls, dikt) -> 'InlineResponse20011':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'RequestMnemonicModel':\n return util.deserialize_model(dikt, cls)",
"def serialize(model):\n # first we get the names of all the columns on your model\n columns = [c.key for c in class_mapper(model.__class__).columns]\n # then we return their values in a dict\n return dict((c, getattr(model, c)) for c in columns)",
"def serialize(model):\n # first we get the names of all the columns on your model\n columns = [c.key for c in class_mapper(model.__class__).columns]\n # then we return their values in a dict\n return dict((c, getattr(model, c)) for c in columns)",
"def serialize(model):\n # first we get the names of all the columns on your model\n columns = [c.key for c in class_mapper(model.__class__).columns]\n # then we return their values in a dict\n return dict((c, getattr(model, c)) for c in columns)",
"def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data",
"def test_create_from_dict(self):\n b1 = BaseModel()\n b1.name = \"Holberton\"\n b1.my_number = 89\n my_model_json = b1.to_dict()\n b2 = BaseModel(**my_model_json)\n self.assertEqual(b1.my_number, b2.my_number)\n self.assertEqual(b1.id, b2.id)\n self.assertEqual(b1.name, b2.name)\n self.assertEqual(b1.created_at, b2.created_at)\n self.assertEqual(b1.updated_at, b2.updated_at)\n self.assertNotEqual(b1, b2)",
"def load_model(self) -> Any:",
"def convert_to_pydantic(dict_object: Dict) -> MessageModel:\n return MessageModel.parse_obj(dict_object)",
"def convert_to_pydantic(dict_object: Dict) -> MessageModel:\n return MessageModel.parse_obj(dict_object)",
"def from_dict(cls, dikt) -> 'Espacio':\n return deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Raspi':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'ModelDeploymentConfig':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Neo4jGraphData':\n return util.deserialize_model(dikt, cls)",
"def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data",
"def from_dict(cls, dikt) -> 'Spacecraft':\n return util.deserialize_model(dikt, cls)",
"def to_payload(self, model):\n return model",
"def from_dict(cls, dikt: dict) -> 'User':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'VotingUser':\n return util.deserialize_model(dikt, cls)",
"def to_dict(self) -> Dict[str, Union[str, Number, dict, list]]:\n model = dict()\n model[\"name\"] = self.get_model_name()\n model[\"allocation_paradigm\"] = self.allocation_paradigm.name\n model[\"cpu_count\"] = self.cpu_count\n model[\"time_range\"] = self.time_range.to_dict()\n model[\"hydrofabric_data_id\"] = self.hydrofabric_data_id\n model[\"hydrofabric_uid\"] = self.hydrofabric_uid\n model[\"config_data_id\"] = self.config_data_id\n model[\"bmi_config_data_id\"] = self._bmi_config_data_id\n if self.catchments is not None:\n model[\"catchments\"] = self.catchments\n if self.partition_cfg_data_id is not None:\n model[\"partition_config_data_id\"] = self.partition_cfg_data_id\n\n return {\"model\": model, \"session-secret\": self.session_secret}",
"def build_simple_model(self, razor_json):\n \n # loop through all the nodes that were returned and take the simple info from them\n for response in razor_json['response']:\n model = {'name': response['@name'],\n 'root_password': response['@root_password'],\n 'current_state': response['@current_state'],\n 'uuid': response['@uuid'],\n 'label': response['@label']\n }\n\n return model",
"def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model",
"def from_dict(cls, dikt) -> 'Parameters':\n return util.deserialize_model(dikt, cls)",
"def model(self, key, model_type:T, default=undefined, description=None, **kwargs) -> T:\n return self._process(key, description=description, default=default, cast=cast_pydantic(model_type),type=model_type, **kwargs)",
"def get(self) -> dict:\n s3d_model = clone(self.__dict__)\n\n for k, v in s3d_model.items():\n if (has_get_method(v)):\n s3d_model[k] = v.get()\n\n return s3d_model",
"def from_dict(cls, dikt) -> \"SapDataIm\":\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Story':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'JWKS':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'RecipeObjectNutrients':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'OrgApacheSlingModelsImplModelAdapterFactoryProperties':\n return util.deserialize_model(dikt, cls)",
"def _get_latest_model(cls, model, spec):\n if hasattr(model, \"KEY\") and model.KEY is not None:\n spec[\"content\"] = model\n model = cls\n elif hasattr(model, \"STRUCT\"):\n spec[\"content\"] = model.STRUCT\n else:\n # Is a dict\n spec[\"content\"] = model\n spec[\"object\"] = model\n return model",
"def from_dict(cls, dikt) -> 'GnbrEntity':\n return util.deserialize_model(dikt, cls)",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()",
"def _to_dict(self):\n return self.to_dict()"
] |
[
"0.6939894",
"0.68444926",
"0.67725724",
"0.67725724",
"0.67725724",
"0.67725724",
"0.67725724",
"0.67725724",
"0.6739522",
"0.6698291",
"0.6698291",
"0.66464084",
"0.66383654",
"0.66022646",
"0.660082",
"0.65999925",
"0.6593835",
"0.6580194",
"0.65621036",
"0.64150435",
"0.64147764",
"0.64146376",
"0.6394627",
"0.6360003",
"0.63511944",
"0.6348897",
"0.63345134",
"0.6328747",
"0.63221914",
"0.6297076",
"0.6284401",
"0.62781453",
"0.6261736",
"0.6239387",
"0.6238494",
"0.62338966",
"0.622121",
"0.62197393",
"0.6215802",
"0.6213947",
"0.62106633",
"0.6204965",
"0.6204309",
"0.61897135",
"0.61766773",
"0.6168027",
"0.61655724",
"0.6148202",
"0.6148202",
"0.6148202",
"0.61430305",
"0.6142995",
"0.61416256",
"0.61302376",
"0.61139095",
"0.611159",
"0.6106887",
"0.61026585",
"0.60949785",
"0.6090516",
"0.60894674",
"0.6087819",
"0.6087819",
"0.6087819",
"0.6084278",
"0.6083178",
"0.6068139",
"0.606291",
"0.606291",
"0.60623866",
"0.60559183",
"0.60459113",
"0.6036312",
"0.60295975",
"0.60277486",
"0.60266393",
"0.6012228",
"0.6000146",
"0.5995877",
"0.59837526",
"0.5976731",
"0.5969706",
"0.5963076",
"0.5958296",
"0.59500295",
"0.5945975",
"0.59378654",
"0.5933584",
"0.59314626",
"0.5921435",
"0.59211457",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938",
"0.5917938"
] |
0.59424317
|
86
|
Gets the email of this UserBase. The Zulip API email address of the user or bot. If you do not have permission to view the email address of the target user, this will be a fake email address that is usable for the Zulip API but nothing else.
|
def email(self):
return self._email
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def get_email(self):\n return self._email",
"def get_email(self):\n return self.email",
"def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")",
"def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]",
"def getEmail(self):\n return self.__email",
"def getEmail(self):\n return self.email",
"def email(self, instance):\r\n return instance.user.email",
"def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''",
"def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email",
"def log_useremail(self):\n return self.user.email",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def getEmail(self):\n\t\treturn self.Email",
"def email(self):\n return self._dict.get('email')",
"def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")",
"def email(self) -> str:\n return self._email",
"def email_address(self) -> str:\n return self._email_address",
"def get_email(obj):\r\n return obj.user.email",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)",
"def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)",
"def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")",
"def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")",
"def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")",
"def Email(self, default=None):\n return self.data.get('email', default)",
"def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")",
"def customer_email(self):\n return self._customer_email",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)",
"def email(self):\n return self.__email",
"def get_email_of_user(auth0_id):\n _verify_auth0_id(auth0_id)\n return _get_email_of_user(\n auth0_id, token_redis_connection(), auth0_token(),\n current_app.config)",
"def service_account_email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_account_email_address\")",
"def get_user_email():\n if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:\n return None\n\n csc_email = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('email', None), False)\n\n return csc_email[0] if csc_email else not_found('csc_email')\n return None",
"def business_owner_email(self):\n return self._business_owner_email",
"def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")",
"def service_account_email_address(self) -> str:\n return pulumi.get(self, \"service_account_email_address\")",
"def business_email(self):\n return self._business_email",
"def get_email(self, token, uid):\n\n email_info_resp = get_remote(get_config('login.weibo.email_info_url') + token)\n email_info_resp_json = json.loads(email_info_resp)\n\n if email_info_resp_json.get(\"error\") is not None:\n raise Exception(email_info_resp_json)\n\n return email_info_resp_json['email']",
"def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]",
"def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")",
"def get_email_for_nickname(cls, nickname):\n account = cls.get_account_for_nickname(nickname)\n if account is None:\n return None\n return account.email",
"def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]",
"def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)",
"def get_email(khoros_object, user_settings=None, user_id=None, login=None, first_name=None, last_name=None,\n allow_multiple=False, display_warnings=True):\n user_settings = process_user_settings(user_settings, user_id=user_id, login=login,\n first_name=first_name, last_name=last_name)\n where_clause = _get_where_clause_for_email(user_settings)\n return _get_user_identifier(khoros_object, 'email', where_clause, allow_multiple, display_warnings)",
"def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")",
"def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None",
"def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)",
"def get_email():\n headers = request.headers\n token = headers['Authorization'].split()[1]\n return Token.objects(access_token=token).first().email",
"def email(self, login_failures):\n return login_failures.user.email",
"def get_user_email(username):\r\n return '{0}@test.com'.format(username)",
"def get_default_email(self):\n email = 'error@error.error'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email",
"def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")",
"def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")",
"def get_email_address(user_id: UserID) -> str:\n email_address = db.session \\\n .query(DbUser.email_address) \\\n .filter_by(id=user_id) \\\n .scalar()\n\n if email_address is None:\n raise ValueError(\n f\"Unknown user ID '{user_id}' or user has no email address\"\n )\n\n return email_address",
"def get_my_email():\n return check_output(['git', 'config', '--get',\n 'user.email']).strip().decode('utf-8')",
"def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")",
"def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address",
"def get_user(self):\n if not self.is_valid():\n return None\n # error checking done in: clean_email\n # NOTE: all emails are stored in lower-case\n e = self.clean_email().lower()\n return User.objects.get(email=e)",
"def technical_owner_email(self):\n return self._technical_owner_email",
"def customer_email(customer):\n return customer.get(\"email\")",
"def delivery_email(self):\n return self._delivery_email",
"def ___str__(self):\n return self.email",
"def ldap_get_email(self, user):\n result = super(Auth42, self)._search_not_empty(user)\n if result is not None:\n alias = result.get(\"alias\")[1]\n return alias\n\n return None",
"def get_current_user_emails(self):\n user_service = self.runtime.service(self, 'user')\n xb_user = user_service.get_current_user()\n\n return xb_user.emails",
"def __str__(self):\n return self.user.email",
"def elastic_cloud_email_address(self) -> str:\n return pulumi.get(self, \"elastic_cloud_email_address\")",
"def get_full_name(self):\n\t\treturn self.email",
"def displayname(self):\n return self.email",
"def get_member_email(username=None, portal_membership=None):\n\n if portal_membership is None:\n portal = getSite()\n portal_membership = getToolByName(portal, 'portal_membership', None)\n if portal_membership is None:\n # unit test or non-CMF site\n return None\n\n if username is None:\n member = portal_membership.getAuthenticatedMember()\n else:\n member = portal_membership.getMemberById(username)\n if member is None:\n if username is not None and '@' in username:\n # Use case: explicitly adding a mailing list address\n # to the watchers.\n return username\n return None\n\n try:\n email = member.getProperty('email')\n except Unauthorized:\n # this will happen if CMFMember is installed and the email\n # property is protected via AT security\n email = member.getField('email').getAccessor(member)()\n return email",
"def get_email_hash(self):\n return sha256(self.email).hexdigest()",
"def get_short_name(self):\n # The user is identified by the email address\n return self.email",
"def _get_contact_email(app):\n return app[CONTACT_EMAIL_KEY]",
"def get_name(self):\n return self.user.username if self.user.username else self.user.email",
"def __email_for_user(self, username):\n user_3tuple = self.usermap.lookup_by_p4user(username)\n if not user_3tuple:\n return _('Unknown Perforce User <{}>').format(username)\n return \"<{0}>\".format(user_3tuple[p4gf_usermap.TUPLE_INDEX_EMAIL])",
"def notification_sender_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_sender_email\")",
"def clean_email(self):\n if getattr(self.instance, 'email', None):\n raise ValidationError(self.registered_error)\n return self.cleaned_data['email']",
"def get_id(self):\n return self.email",
"def get_id(self):\n return self.email",
"def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None",
"def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None",
"def get_change_email(self):\n return self.client.get(self.change_email_url)",
"def user(self) -> str:\n return self._user",
"def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")",
"def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")",
"def getToUser(self):\n return self.toUser",
"def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")"
] |
[
"0.8533475",
"0.80256367",
"0.80006677",
"0.7957389",
"0.78827",
"0.7873399",
"0.78575724",
"0.7735007",
"0.76502186",
"0.76435256",
"0.7608377",
"0.76025885",
"0.76025885",
"0.76025885",
"0.75683004",
"0.75669813",
"0.75593585",
"0.7512967",
"0.74382466",
"0.7384822",
"0.7312271",
"0.7312271",
"0.7312271",
"0.7312271",
"0.7233763",
"0.7229699",
"0.7174858",
"0.71594155",
"0.7156566",
"0.70793265",
"0.6989799",
"0.69523394",
"0.69245017",
"0.69245017",
"0.69245017",
"0.69245017",
"0.69245017",
"0.69245017",
"0.69245017",
"0.6898748",
"0.68785936",
"0.68770176",
"0.68405765",
"0.6837404",
"0.6834967",
"0.68113446",
"0.6810365",
"0.67769766",
"0.6764068",
"0.676307",
"0.6714461",
"0.67085344",
"0.6699093",
"0.66935426",
"0.6672536",
"0.6646794",
"0.6548748",
"0.649097",
"0.64615875",
"0.6444525",
"0.6379141",
"0.6352307",
"0.6333458",
"0.6333458",
"0.6310952",
"0.63084936",
"0.62979865",
"0.62620234",
"0.62556064",
"0.62415844",
"0.61944616",
"0.61458385",
"0.6145138",
"0.6140332",
"0.6125832",
"0.6122474",
"0.61168283",
"0.6093689",
"0.60930336",
"0.6072708",
"0.607085",
"0.6069905",
"0.6058267",
"0.6038827",
"0.59858316",
"0.5980349",
"0.59592646",
"0.59563583",
"0.59563583",
"0.59426284",
"0.59426284",
"0.5916634",
"0.58956194",
"0.5881259",
"0.5881259",
"0.58779526",
"0.5876367"
] |
0.76262873
|
12
|
Sets the email of this UserBase. The Zulip API email address of the user or bot. If you do not have permission to view the email address of the target user, this will be a fake email address that is usable for the Zulip API but nothing else.
|
def email(self, email):
self._email = email
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setEmail(self, email):\n self.email = email\n return self",
"def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email",
"def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email",
"def email(self, email: str):\n\n self._email = email",
"def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)",
"def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email",
"def user_profile_setemail(token, email):\n users = database.get_users()\n for user in users:\n if user['email'] is email:\n raise error.InputError(description=\"This email is already taken\")\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['email'] = email\n database.set_user_data(user)",
"def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address",
"def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email",
"def get_email(self):\n return self.email",
"def admin_email(self, admin_email):\n\n self._admin_email = admin_email",
"def email(self, instance):\r\n return instance.user.email",
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email",
"def email(self) -> str:\n return self._email",
"def getEmail(self):\n return self.__email",
"async def change_email(self, new_email, password):\n data = {\"password\": password, \"emailAddress\": new_email}\n e = await self.request.request(url='https://accountsettings.roblox.com/v1/email', method='post', data=data)\n return e",
"def getEmail(self):\n return self.email",
"def get_email(self):\n return self._email",
"def email_address(self) -> str:\n return self._email_address",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def updateEmail(self, newEmail, password):\n\t\turl = \"https://habitica.com/api/v3/user/auth/update-email\"\n\t\tpayload = {\"newEmail\": newEmail, \"password\": password}\n\t\treturn(putUrl(url, self.credentials, payload))",
"def update_user_email(self, user_id, email, realname):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None:\n raise Exception(\"Unexpected empty object: user_id.\")\n if len(email) == 0:\n raise Exception(\"Email address not provided.\")\n if len(realname) == 0:\n raise Exception(\"Name not provided.\")\n\n if not self.database.update_user(user_id, email, realname, None):\n raise Exception(\"An internal error was encountered when updating the user.\")\n return True",
"def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email",
"def customer_email(self, customer_email):\n self._customer_email = customer_email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def Email(self, default=None):\n return self.data.get('email', default)",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email_address(self, email_address: str):\n if email_address is None:\n raise ValueError(\"Invalid value for `email_address`, must not be `None`\") # noqa: E501\n\n self._email_address = email_address",
"def getEmail(self):\n\t\treturn self.Email",
"def set_email(net_id, email):\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET email='\"+email+\"' WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()",
"def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")",
"def test_user_profile_setemail(url):\n test_clear(url)\n admin_tk, admin_id = channel_user_create_0(url)\n\n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == 'z5555555@gmail.com'\n assert profile_resp['user']['name_first'] == 'admin'\n assert profile_resp['user']['name_last'] == 'admin'\n\n test_profile_setemail = {\n 'token': admin_tk,\n 'email': 'fake@gmail.com'\n }\n requests.put(url + \"user/profile/setemail\", json=test_profile_setemail)\n \n test_profile = {\n 'token': admin_tk,\n 'u_id': admin_id\n }\n resp = requests.get(url + \"user/profile\", params=test_profile)\n profile_resp = resp.json()\n assert profile_resp['user']['u_id'] == admin_id\n assert profile_resp['user']['email'] == 'fake@gmail.com'\n assert profile_resp['user']['name_first'] == 'admin'\n assert profile_resp['user']['name_last'] == 'admin'",
"def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)",
"def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")",
"def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")",
"def _set_user_email_address(self, request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']\n else:\n return form",
"def email(self):\n return self._dict.get('email')",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email",
"def technical_owner_email(self, technical_owner_email):\n\n self._technical_owner_email = technical_owner_email",
"def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)",
"def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]",
"def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")",
"def business_email(self, business_email):\n\n self._business_email = business_email",
"def SetCurrentUser(self, email, user_id='123456', is_admin=False):\n email = email or ''\n user_id = user_id or ''\n is_admin = '1' if is_admin else '0'\n self.testbed.setup_env(user_is_admin=is_admin,\n user_email=email,\n user_id=user_id,\n overwrite=True)",
"def log_useremail(self):\n return self.user.email",
"def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email",
"def validate_email(self, value):\n if not value:\n raise serializers.ValidationError(\"Email cannot be null\")\n return value",
"def set_email_notification(self, hit_type, email, event_types=None):\r\n return self._set_notification(hit_type, 'Email', email, event_types)",
"def email(self):\n return self.__email",
"def client_email(self, client_email):\n\n self._client_email = client_email",
"def _set_campaign_email(self, campaign_email):\n if isinstance(campaign_email, str):\n campaign_email = TrackedCampaignEmail.objects.create(\n campaign=self.campaign, name=campaign_email\n )\n\n campaign_email.save()\n\n self.campaign_email = campaign_email",
"def get_email(obj):\r\n return obj.user.email",
"def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")",
"def email(self):\n return sa.Column(sa.Unicode(100), nullable=False, unique=True)",
"def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)",
"def create_user_email(user):\n if not user.is_authenticated:\n return False\n \n user.email = \"%s@%s\" % (user.username, settings.DEFAULT_EMAIL_HOST)\n user.save()\n \n return user.email",
"def from_email_address(self, val: EmailAddress):\n self._from_email = val",
"def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key",
"def employer_email(self, employer_email):\n if employer_email is not None and len(employer_email) > 1024:\n raise ValueError(\"Invalid value for `employer_email`, length must be less than or equal to `1024`\") # noqa: E501\n\n self._employer_email = employer_email",
"def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")",
"def clean_email(self):\n try:\n user = User.objects.get(email__exact=self.cleaned_data['email'])\n except User.DoesNotExist:\n return self.cleaned_data['email']\n raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))",
"def set_email_para(self,\n email_dict):\n\n self.__email_flag__ = 1\n\n # email\n self.__email_host__ = email_dict[\"email_host\"]\n self.__email_receiver_list__ = email_dict[\"email_recv_list\"]\n self.__email_sender__ = email_dict[\"email_sender_mailbox\"]\n self.__email_user__ = email_dict[\"email_username\"]\n self.__email_passwd__ = email_dict[\"email_password\"]\n\n print(\"NotifyManager email host=%s\"\n % self.__email_host__)\n print(\"NotifyManager email sender mailbox=%s\"\n % self.__email_sender__)\n print(\"NotifyManager email receiver mailbox=%s\"\n % self.__email_receiver_list__)\n\n return",
"def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)",
"def service_account_email_address(self) -> str:\n return pulumi.get(self, \"service_account_email_address\")",
"def email(self, email_id):\r\n return emails.Email(self, email_id)",
"def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")",
"def customer_email(self):\n return self._customer_email",
"def test_email_is_optional(self):\n self.updated_data['email'] = ''\n self.update_user()\n self.assertEqual(self.user.email, self.updated_data['email'])",
"def set(self, **kwargs: Any) -> None: # nosec\n attributes = {}\n user_id: int = int(kwargs[\"user_id\"])\n user = self.first(id_int=user_id)\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = _serialize(user, to_bytes=True)\n\n self.update_one(query={\"id_int\": user_id}, values=attributes)"
] |
[
"0.7745995",
"0.734506",
"0.7238552",
"0.71991414",
"0.70291823",
"0.69148123",
"0.67069113",
"0.66328615",
"0.66016483",
"0.65045893",
"0.65045893",
"0.64563024",
"0.6400238",
"0.6398678",
"0.63835156",
"0.6366219",
"0.6354441",
"0.6354117",
"0.6353652",
"0.6346813",
"0.63368684",
"0.6330958",
"0.63237834",
"0.63237834",
"0.63237834",
"0.6287133",
"0.6276219",
"0.62267196",
"0.6217892",
"0.62111086",
"0.62111086",
"0.62111086",
"0.62111086",
"0.61917096",
"0.6180604",
"0.6180604",
"0.6180604",
"0.6180604",
"0.61626333",
"0.61151326",
"0.60969853",
"0.60896885",
"0.6076968",
"0.6076968",
"0.6076968",
"0.6076319",
"0.60705084",
"0.6062344",
"0.60620564",
"0.60455084",
"0.6021097",
"0.6004122",
"0.5988599",
"0.5988599",
"0.5988599",
"0.5988599",
"0.5988599",
"0.5988599",
"0.5988599",
"0.59810287",
"0.59701294",
"0.5948885",
"0.59405345",
"0.5921601",
"0.58893305",
"0.5883363",
"0.5882786",
"0.5855642",
"0.5828899",
"0.58004665",
"0.579265",
"0.57872546",
"0.5757175",
"0.5744486",
"0.57336146",
"0.5731904",
"0.572697",
"0.57038224",
"0.5670801",
"0.5649051",
"0.56263334",
"0.5608447",
"0.55830383",
"0.55750465",
"0.55739367",
"0.556319",
"0.5542602",
"0.55043507",
"0.54958624",
"0.5494074",
"0.5484314"
] |
0.7170487
|
10
|
Gets the is_bot of this UserBase. A boolean specifying whether the user is a bot or full account.
|
def is_bot(self):
return self._is_bot
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_bot(self) -> bool:\n if self._bot is not None:\n return hasattr(self, 'ubot')\n return bool(Config.BOT_TOKEN)",
"def is_bot(self) -> undefined.UndefinedOr[bool]:",
"def bot_type(self):\n return self._bot_type",
"def is_bot(self) -> bool:",
"def bot(self):\n return self._bot",
"def is_bot(self, is_bot):\n\n self._is_bot = is_bot",
"def is_active_user(self):\n\n return self.is_active",
"def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)",
"def isNew(self):\n bot = self.storage.find_one({\"user\": self.user_id})\n if not bot:\n return True\n return False",
"def is_bot(self, pid:int) -> bool:\n\t\treturn pid in self._agents",
"def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default_bot_for_cog_svc_account\")",
"def is_active(self):\n return self.user.is_active",
"def is_active(self):\n return self.status == ACTIVE_USER",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def is_bot(environ, is_bot_ua=is_bot_ua):\n if is_bot_ua(environ.get('HTTP_USER_AGENT', '')):\n return True\n if 'HTTP_ACCEPT_LANGUAGE' not in environ:\n return True\n return False",
"def is_logged_in(self):\n return self.__is_logged_in",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def is_superuser(self):\n return self.is_admin",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def is_usermanager(self):\n return False",
"def auth_enabled(self):\n\n return self._api_manager.auth_enabled()",
"def get_is_ai(self):\n return self.__is_ai",
"def logged_in(self):\n return self.user is not None",
"def is_turbo(self) -> bool:\n return self.turbo",
"def user_verified(self):\n return self.user.verified",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def get_all_bots(self):\n\t\ttry:\n\t\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\t\tconn.row_factory = sqlite3.Row\n\t\t\tc \t\t\t\t = conn.cursor()\n\t\t\tc.execute('SELECT * FROM bots')\n\t\t\tall_bots = c.fetchall()\n\t\t\treturn all_bots\t\t\t\t\t\t\t\t# list(all_bots) = [<sqlite3.Row object at 0x000001BB27302FD0>, <sqlite3.Row object at 0x000001BB27302CB0>,...]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t# [{pair:'LTCBTC', 'is_active'=True, ...}, {pair:'ETHBTC, 'is_active'=True, ...}]\n\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\treturn False",
"def get_isenabled(self):\n return self.isenabled",
"def get_has_password(self, user):\n return True if user.password else False",
"def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())",
"def isHumanPresence(self):\n\t\treturn self.humanPresence",
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def is_staff(self) -> bool:\n return self.is_admin",
"def is_bool(self):\n answer = self._call('is_bool')\n return answer.yes",
"def bots(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if user.bot:\n yield user",
"def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False",
"def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0",
"def is_admin(self):\n return self.admin",
"def is_staff_user(self):\n\n return self.is_staff",
"def as_bool(self):\n return self.as_type(bool)",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_personal(self):\n return self.user_id is not None",
"def is_user_is_owner(self):\n return self._tag == 'user_is_owner'",
"def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False",
"def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active",
"def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None",
"def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)",
"def is_staff(self):\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def logged_in(self) -> bool:\n return self._logged_in",
"def is_admin(self):\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def get_on_tunnel(self):\n return self._is_on_tunnel",
"def isMuted(self):\n return self._isMuted",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def is_on(self):\n return self._data[\"enabled\"]",
"def is_admin(self):\r\n return self.admin",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def bot_owner_id(self):\n return self._bot_owner_id",
"def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)",
"def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False",
"def is_moderator(self):\n return self.user_type == 'M'",
"def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True",
"def is_enabled(self):\n return self.enabled",
"def getfirstbot(self):\n\n return self.bots[0]",
"def isBoolean(self):\n return _libsbml.ASTNode_isBoolean(self)",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def has_user(self, user, allow_superusers=True):\n return self.has_student(user, allow_superusers) or self.has_ta(user, False) or self.has_instructor(user, False)",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def has_token(self):\n user_id = getattr(self, '_id', None)\n user_token = getattr(self, 'token', None)\n if user_id is not None and user_token is not None:\n return True\n return False",
"def online(self):\n api_call = self.presence()\n if api_call.get('ok'):\n # retrieve all users so we can find our bot\n return api_call.get('online')\n return None",
"def is_staff(self):\r\n return self.is_admin",
"def is_authenticated(self):\n return bool(get_auth_token())",
"def is_active(self):\n with self._lock:\n return self._robot is not None",
"def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)",
"def is_bool(self):\n validator = self.__class__.get_setting_validator(self.key, **self.get_kwargs())\n\n return self.__class__.validator_is_bool(validator)",
"def is_user_player(self, user):\n return self.user == user",
"def is_hero(self):\n return True",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def is_user_admin(self, user):\n return user == self.created_by",
"def is_active(self) -> bool:\n return self.__is_active",
"def bot(self, id):\n json = self.skype.conn(\"GET\", \"{0}/agents\".format(SkypeConnection.API_BOT), params={\"agentId\": id},\n auth=SkypeConnection.Auth.SkypeToken).json().get(\"agentDescriptions\", [])\n return self.merge(SkypeBotUser.fromRaw(self.skype, json[0])) if json else None",
"def get_is_self(self, obj: Profile) -> bool:\n request: HttpRequest = self.context.get('request')\n if request:\n if request.user.is_authenticated:\n return obj == request.user.profile\n return False",
"def is_collaborator(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('GET', url), resource.parse_boolean",
"def is_staff(self):\n\t\treturn self.is_admin",
"def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)",
"def logged_in(self):\n return self._token is not None",
"def get_is_portal_enabled(self):\n return self.is_portal_enabled",
"def logged_in(self):\n return self.auth.get_user_by_session() is not None",
"def verify_user(self):\n verified = False\n if self.user.role.role_name == \"Admin\":\n verified = True\n\n return verified",
"def is_mine(self) -> bool:\n return self.proto.alliance == ALLIANCE.Self.value",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def is_polyphony_user(allow_mods: bool = False):\n # TODO: Add error message that self deletes\n async def predicate(ctx: commands.context):\n user = get_user(ctx.author.id)\n is_mod = False\n if allow_mods:\n is_mod = any(\n role.name in MODERATOR_ROLES for role in ctx.message.author.roles\n )\n if is_mod or user is not None:\n return True\n else:\n await ctx.send(\n f\"Sorry {ctx.message.author.mention}. You are not a Polyphony user. Contact a moderator if you believe this is a mistake.\",\n delete_after=10,\n )\n return False\n\n return commands.check(predicate)"
] |
[
"0.774403",
"0.6506757",
"0.6466639",
"0.63527393",
"0.6351612",
"0.59704244",
"0.5953491",
"0.5796303",
"0.5599795",
"0.55755657",
"0.55704886",
"0.5508024",
"0.5390804",
"0.5384107",
"0.5262253",
"0.52469635",
"0.52303284",
"0.522477",
"0.5224286",
"0.5201915",
"0.5175014",
"0.5174314",
"0.51557016",
"0.5155061",
"0.5142407",
"0.51405805",
"0.5131339",
"0.5123091",
"0.5119839",
"0.5115879",
"0.51093745",
"0.5084982",
"0.5076259",
"0.5075422",
"0.5075422",
"0.5075094",
"0.5045417",
"0.5043217",
"0.50386703",
"0.50248265",
"0.5008166",
"0.49953672",
"0.49926674",
"0.4989222",
"0.4980266",
"0.49583024",
"0.4952507",
"0.49470836",
"0.4941613",
"0.4935096",
"0.49290204",
"0.49290204",
"0.4913776",
"0.488981",
"0.488981",
"0.4887253",
"0.48863414",
"0.488113",
"0.48767424",
"0.48762926",
"0.48728848",
"0.4869542",
"0.48693043",
"0.4867789",
"0.4860254",
"0.48559234",
"0.4841959",
"0.4837415",
"0.4833361",
"0.48275036",
"0.48275036",
"0.48275036",
"0.4825095",
"0.48218152",
"0.48174867",
"0.48165482",
"0.4805401",
"0.48032412",
"0.47977257",
"0.47882",
"0.4779854",
"0.47787145",
"0.47737947",
"0.4772406",
"0.476896",
"0.47596893",
"0.47589615",
"0.47587892",
"0.47558683",
"0.47514856",
"0.47467384",
"0.47412863",
"0.47366253",
"0.47337842",
"0.4730961",
"0.4730856",
"0.47283322",
"0.47260216",
"0.47260216",
"0.4724839"
] |
0.7682634
|
1
|
Sets the is_bot of this UserBase. A boolean specifying whether the user is a bot or full account.
|
def is_bot(self, is_bot):
self._is_bot = is_bot
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_bot(self):\n return self._is_bot",
"def is_bot(self) -> bool:\n if self._bot is not None:\n return hasattr(self, 'ubot')\n return bool(Config.BOT_TOKEN)",
"def bot_type(self, bot_type):\n\n self._bot_type = bot_type",
"def set_is_ai(self, is_ai):\n self.__is_ai = is_ai",
"def is_bot(self) -> undefined.UndefinedOr[bool]:",
"def is_bot(self) -> bool:",
"def bot_type(self):\n return self._bot_type",
"def is_business(self, is_business):\n\n self._is_business = is_business",
"def add_bot(self, bot):\n self.add_user(bot)",
"def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active",
"async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_default_bot_for_cog_svc_account\")",
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def ban_user(self, session, chat_id: int) -> None:\n\n user = session.query(User).get(chat_id)\n if user and user.is_banned is False:\n user.is_banned = True\n session.commit()",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def bot(self):\n return self._bot",
"def boolean(self, boolean):\n\n self._boolean = boolean",
"def is_owner(self, is_owner):\n\n self._is_owner = is_owner",
"async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)",
"def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)",
"def check_if_bot(self, user_id):\n return str(self.get_int_index(bot_id, 9)) in str(user_id)",
"def set(self, boolean):\n self._val = boolean",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def is_bot(self, pid:int) -> bool:\n\t\treturn pid in self._agents",
"def is_user_event(self, is_user_event):\n self._is_user_event = is_user_event",
"def setBot(self, present, dir=-1):\n if dir == -1:\n # print(\"Bot set present at current node: \" + str(present))\n self.botPresent = present\n else:\n self.dirNodes[dir].botPresent = present",
"def set_is_default_org(self, is_default_org):\n self.is_default_org = is_default_org",
"def is_voice_roaming_enabled(self, is_voice_roaming_enabled):\n\n self._is_voice_roaming_enabled = is_voice_roaming_enabled",
"def set_bool_attribute(self, id: str, b: Optional[bool]):\n self.set_attribute(id, None if not b else ConstInt(1))",
"def set_has_fan(self, value: bool = True):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"has_fan\", value))\r\n self._has_fan = value",
"def set_is_max(self, is_max):\n self.__is_max = is_max",
"def _setForBinding (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forBinding = value\n return value",
"async def async_set_is_master(self, is_master):\n self._is_master = is_master",
"def is_muted(self, is_muted):\n # type: (bool) -> None\n\n if is_muted is not None:\n if not isinstance(is_muted, bool):\n raise TypeError(\"Invalid type for `is_muted`, type has to be `bool`\")\n\n self._is_muted = is_muted",
"def set_builder_bot(self, builder_bot): \n self.builder_bot = builder_bot # pragma: no cover",
"async def set_mute(self, value: bool):\n await self._pytheos.api.player.set_mute(self.id, value)",
"def set_user(self, user):\r\n self.user = user",
"def unban_user(self, session, chat_id: int) -> None:\n\n user = session.query(User).get(chat_id)\n if user.is_banned is True:\n user.is_banned = False\n session.commit()",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def bots(self) -> Generator[discord.User, None, None]:\n for user in self.users:\n if user.bot:\n yield user",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def set_as_type_user(self):\n self.type = MessageTypes.USER",
"def set_user(self, user: User):\n self.__user = user",
"def parse_bot(self) -> None:\n if not self.skip_bot_detection and not self.bot:\n self.bot = Bot(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n self.all_details['bot'] = self.bot.ua_data",
"def set_visible(self, is_visible):\n self._data['is_visible'] = 1 if is_visible else 0",
"def is_staff(self) -> bool:\n return self.is_admin",
"def is_staff_user(self):\n\n return self.is_staff",
"def is_active_user(self):\n\n return self.is_active",
"async def async_set_turbo(self, turbo, state_mode):\n if turbo not in ON_OFF_LIST:\n return\n self._turbo = turbo.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()",
"def is_active(self, is_active: bool):\n if is_active is None:\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\")\n\n self._is_active = is_active",
"def is_taxable(self, is_taxable: bool):\n if is_taxable is None:\n raise ValueError(\"Invalid value for `is_taxable`, must not be `None`\") # noqa: E501\n\n self._is_taxable = is_taxable",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def is_bot(environ, is_bot_ua=is_bot_ua):\n if is_bot_ua(environ.get('HTTP_USER_AGENT', '')):\n return True\n if 'HTTP_ACCEPT_LANGUAGE' not in environ:\n return True\n return False",
"def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False",
"def is_guest(self, is_guest):\n\n self._is_guest = is_guest",
"def is_active(self, is_active):\n\n self._is_active = is_active",
"def is_active(self, is_active):\n\n self._is_active = is_active",
"def is_active(self, is_active):\n\n self._is_active = is_active",
"def is_active(self, is_active):\n\n self._is_active = is_active",
"def is_usermanager(self):\n return False",
"async def musicbot(self, ctx, the_state):\r\n is_mod = False\r\n for role in ctx.message.author.roles:\r\n if role.name == \"Moderators\":\r\n is_mod = True\r\n if is_mod:\r\n if the_state == \"1\":\r\n self.music_off = False\r\n await ctx.send(\"Music Bot features now on\")\r\n else:\r\n self.music_off = True\r\n await ctx.send(\"Music Bot features now off\")\r\n else:\r\n await ctx.send(\"**Error:** You are not allowed to use this command!\")",
"def is_active(self, is_active):\n \n self._is_active = is_active",
"def set_is_staff(self, role):\n self.is_staff = (role != User.ROLE_USER)",
"def is_user_allowed(self, user):\n return user.is_staff",
"def set_is_portal_enabled(self, is_portal_enabled):\n self.is_portal_enabled = is_portal_enabled",
"def ban_user(cls, user):\n\n banned = False\n with transaction.atomic():\n banned = True if cls.ban_phone_number(user.mobile_phone) else banned\n\n if user.add_mobile_phone:\n banned = True if cls.ban_phone_number(user.add_mobile_phone) else banned\n\n try:\n # Landline phones may be set in regional format and this may cause exceptions.\n # To ban the user we at least need to ban his mobile phone numbers.\n # Landline phones are optional, and it's ok if them would not be added to the ban-list.\n if user.landline_phone:\n banned = True if cls.ban_phone_number(user.landline_phone) else banned\n\n if user.add_landline_phone:\n banned = True if cls.ban_phone_number(user.add_landline_phone) else banned\n\n except Exception:\n pass\n\n\n cls.signals.user_banned.send(cls, user=user)\n return banned",
"def is_default(self, is_default):\n # type: (bool) -> None\n\n if is_default is not None:\n if not isinstance(is_default, bool):\n raise TypeError(\"Invalid type for `is_default`, type has to be `bool`\")\n\n self._is_default = is_default",
"def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id",
"def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True",
"def setBoolValue(self, *args):\n return _libsbml.ConversionProperties_setBoolValue(self, *args)",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"async def greeter_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"greeter_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Greeter is now **enabled**\")\n else:\n await util.send_success(ctx, \"Greeter is now **disabled**\")",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def isNew(self):\n bot = self.storage.find_one({\"user\": self.user_id})\n if not bot:\n return True\n return False",
"def is_live(self, is_live):\n # type: (bool) -> None\n\n if is_live is not None:\n if not isinstance(is_live, bool):\n raise TypeError(\"Invalid type for `is_live`, type has to be `bool`\")\n\n self._is_live = is_live",
"def SetBold(self, bold):\r\n\r\n self._isBold = bold",
"def is_staff(self):\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def toggle_active(self, user):\n user.active = not user.active\n return True",
"def is_preferred(self, is_preferred):\n \n self._is_preferred = is_preferred",
"def _setstaff_login(self):\r\n\r\n self.user.is_staff = True\r\n self.user.save()\r\n\r\n self.client.login(username=self.user.username, password='foo')",
"def set_is_default(self):\n self.is_default = True",
"def set_is_default(self):\n self.is_default = True",
"def set_cuda(self, is_cuda):\n self.is_cuda = is_cuda",
"def setBoolValue(self, *args):\n return _libsbml.ConversionOption_setBoolValue(self, *args)",
"def set_superuser_permission(strategy, details, backend, *args, user=None, **kwargs):\n email = details.get('username')\n if email in settings.SOCIAL_AUTH_DJANGO_SUPERUSERS:\n user.is_superuser = True\n user.save()\n return {\n 'is_new': kwargs.get('is_new'),\n 'user': user\n }",
"def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status",
"def set_is_email_verified(profile, is_email_verified):\n profile.metadata.update({\"is_email_verified\": is_email_verified})\n profile.save()",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def set_user(self, user):\n self._user = user",
"def basetype_setup(self):\n # the text encoding to use.\n self.db.encoding = \"utf-8\"\n # A basic security setup\n lockstring = \"examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:false()\"\n self.locks.add(lockstring)\n # set the basics of being a bot\n self.cmdset.add_default(BotCmdSet)\n script_key = \"%s\" % self.key\n self.scripts.add(BotStarter, key=script_key)\n self.is_bot = True",
"def is_default(self, is_default: bool):\n\n self._is_default = is_default",
"def setBoolean(self, key, value):\n self.__config.setValue(key, QtCore.QVariant(value))\n self.__saved = False",
"def set_meta(self, meta):\n self._meta['user_meta'] = meta\n return self",
"def setAuto(self, auto):\n # type: (bool)->None\n\n self._validator.validate_one(\n 'auto', VALID_OPTS['auto'], auto)\n self._ifAttributes['auto'] = auto",
"def is_active(self, is_active):\n if self.local_vars_configuration.client_side_validation and is_active is None: # noqa: E501\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\") # noqa: E501\n\n self._is_active = is_active",
"def is_turbo(self) -> bool:\n return self.turbo",
"def SetHasDropDown(self, b):\r\n\r\n self.dropdown = b"
] |
[
"0.6343722",
"0.6289822",
"0.58650166",
"0.57027304",
"0.544486",
"0.5285852",
"0.51772743",
"0.5169832",
"0.50523525",
"0.5034742",
"0.5024811",
"0.50076777",
"0.48692012",
"0.48260602",
"0.48116446",
"0.4810998",
"0.48001114",
"0.4750782",
"0.47010517",
"0.46964487",
"0.46854365",
"0.4650688",
"0.46459943",
"0.4630177",
"0.46200874",
"0.4600986",
"0.45955223",
"0.45897764",
"0.4569611",
"0.45645013",
"0.4556832",
"0.45481023",
"0.45333633",
"0.4526721",
"0.4525229",
"0.4523721",
"0.44975254",
"0.44966874",
"0.4494529",
"0.44913274",
"0.4473088",
"0.44717112",
"0.44570392",
"0.44454578",
"0.44453537",
"0.44304436",
"0.44299087",
"0.44297984",
"0.43967932",
"0.4395911",
"0.43952054",
"0.43938604",
"0.43835944",
"0.4357933",
"0.43519875",
"0.43489185",
"0.43414277",
"0.43414277",
"0.43414277",
"0.43414277",
"0.43187508",
"0.43160293",
"0.43156826",
"0.43151772",
"0.43049127",
"0.4303796",
"0.42965555",
"0.4296254",
"0.42903435",
"0.42878497",
"0.42876154",
"0.42603412",
"0.42576888",
"0.4254857",
"0.4249524",
"0.4214587",
"0.42097217",
"0.42072046",
"0.42072046",
"0.4199111",
"0.41945502",
"0.4193639",
"0.4193406",
"0.4193406",
"0.4185046",
"0.41829473",
"0.41816276",
"0.4178414",
"0.41721123",
"0.41642445",
"0.41642445",
"0.4163099",
"0.4158545",
"0.41580266",
"0.41435632",
"0.4135896",
"0.41330424",
"0.41321087",
"0.41267806",
"0.4118053"
] |
0.7644477
|
0
|
Gets the avatar_url of this UserBase.
|
def avatar_url(self):
return self._avatar_url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()",
"def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'",
"def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")",
"def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")",
"def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)",
"def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'",
"def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off",
"def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)",
"def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url",
"def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )",
"def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )",
"def avatar_id(self):\n return self._avatar_id",
"def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url",
"def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture",
"def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar",
"def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'",
"def app_avatar(self):\n with open(self.AVATAR_PATH, 'rb') as handle:\n return handle.read()",
"def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url",
"def image_url(self) -> str:\n return self._image_url",
"def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )",
"def get_photo_url(self):\n try:\n return self.profile_data[\"photoUrl\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve photo url: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)",
"def get_avatar_url_for_user(user_id: UserID) -> str | None:\n avatar_urls_by_user_id = get_avatar_urls_for_users({user_id})\n return avatar_urls_by_user_id.get(user_id)",
"def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)",
"def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar",
"def get_profile_picture_url(cls, filename):\n if filename is None:\n return None\n profile_picture = bucket.blob('images/users/'+filename)\n if profile_picture.exists():\n profile_picture.make_public()\n return profile_picture.public_url\n return None",
"def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)",
"def get_thumb_url(self):\n return self.thumb_url",
"def get_thumbnail_url(self):\n return self.thumbnail_url",
"def image_url(self) -> str:\n return pulumi.get(self, \"image_url\")",
"def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g",
"def get_thumbnail_url(self):\n raise NotImplementedError(\"Subclass must implement abstract method get_thumbnail_url\")",
"def image_url(self):\n return self.photo_url or GENERIC_IMAGE",
"def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"",
"def GetAvatarForUser(self, userName):\n\n return self.__GetJsonOrNone(\"/users/\"+userName+\"/avatar\", False)",
"def media_image_url(self):\n return self._imageUrl",
"def get_absolute_url(self):\n return '/profile/%s' % self.id",
"def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url",
"def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url",
"def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url",
"def media_image_url(self):\n return self._media_image_url",
"def media_image_url(self):\n return self._media_image_url",
"def profile_image_src(self, size):\n if self.profile_image:\n return join_path(STATIC_IMAGE_URL, 'users', \"{}.{}.{}.png\".format(self.id, self.profile_image, size)).replace(\"\\\\\", '/')\n return join_path(STATIC_IMAGE_URL, \"users\", \"no_profile.jpg\").replace(\"\\\\\", '/')",
"def get_url_image(self, obj):\n return settings.SERVER_HOST + obj.image.url",
"def get_absolute_url(self) -> str:\n return \"/users/%s/\" % self.email",
"def _get_base_url(self):\n\n # This should have been established by _logon\n assert self.__userid\n\n return \"/users/%s\" % self.__userid",
"def avatar_hash(self) -> typing.Optional[str]:",
"def url(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"url\" # pylint: disable=unsubscriptable-object\n )",
"def get_avatar_path(instance, filename):\n\n parts = str(filename).split(\".\")\n return 'avatars/' + instance.username + '/' + slugify(parts[0]) + '.' + parts[1]",
"def user_info_url(self):\n return self._user_info_url",
"async def avatar(message):\n return \"link to your avatar: {}\".format(message.user_avatar)",
"def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id() or not self.get_username():\n return ''\n \n channel_formated = 'x%sx' % (self.get_username().replace('_', '-'))\n api_url = 'http://%s.api.channel.livestream.com/2.0/thumbnail.json?id=%s' % (channel_formated, self.get_video_id())\n \n res = self._oembed_request(api_url)\n thumbnail = res.get('thumbnail', {})\n self.thumbnail_url = thumbnail.get('@url', '')\n return self.thumbnail_url",
"def get_avatar_id(self):\n # Implemented from template for osid.resource.Resource.get_avatar_id_template\n if not self._my_map['avatarId']:\n raise errors.IllegalState('this Resource has no avatar')\n else:\n return Id(self._my_map['avatarId'])",
"def avatar_version(self):\n return self._avatar_version",
"def get_image_url():",
"def avatar_path(_instance, filename):\n file_path = os.path.join('avatars', str(uuid4()))\n ext = filename.split('.')[-1]\n return '{}.{}'.format(file_path, ext)",
"def base_url(self):\n return self._base_url",
"def thumbnail(self):\n return self.get_thumbnail_url()",
"def image(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"image\" # pylint: disable=unsubscriptable-object\n )",
"def get_base_url(self):\n return getattr(self.instance, 'base_url')",
"def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if not self.thumbnail_url:\n api_url = 'https://api.dailymotion.com/video/%s?fields=thumbnail_url' % self.get_video_id()\n res = self._oembed_request(api_url)\n self.thumbnail_url = res.get('thumbnail_url', '')\n return self.thumbnail_url",
"def get_profile_picture(user):\n b = boto_init_s3(settings.BUCKET_NAME)\n if b:\n try:\n p = ProfilePicture.objects.get(is_current=True, user_id=user)\n s3_file_path = b.get_key(p.path)\n return s3_file_path.generate_url(expires_in=600)\n except:\n return \"\"\n return \"\"",
"def get_url(self):\n try:\n return self._file.url\n except AttributeError:\n raise NotImplementedError(\"Underlying file does not have a URL.\")",
"def get_url(self) -> str:\n return urljoin(self._base_url, self.url)",
"def base_url(self) -> str:\n return self._base_url",
"def gravatar_url(context, email, size=None):\n return get_gravatar_url_for_email(context['request'], email, size)",
"def getAvatarInfo(self):\n return \", \".join(self._get_avatar_info())",
"def base_url(self) -> str | None:\n return self._base_url",
"def base_url(self):\n return self._get_base_url()",
"def media_image_url(self):\n return self._current_item[\"image\"]",
"def get_url(self):\n\n return self.url",
"def get_url(self):\n\n return self.url",
"def robo_avatar_url(user_data, size=80):\n hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()\n url = \"https://robohash.org/{hash}.png?size={size}x{size}\".format(\n hash=hash, size=size)\n return url",
"def profile_photo(self):\n images_directory_index = 6\n filepath = None\n photo = self.profile_photo_path\n if photo is not None:\n photo_dir = photo.split(\"/\")[images_directory_index:]\n filepath = \"/\".join(photo_dir)\n return filepath",
"def media_image_url(self):\n if (media_status := self._media_status()[0]) is None:\n return None\n\n images = media_status.images\n\n return images[0].url if images and images[0].url else None",
"def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url",
"def avatar_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_avatar_url, self.title)\r\n return mark_safe(h)",
"def get_thumbnail_url(self):\n if not self.get_video_id():\n return ''\n \n if not self.thumbnail_url:\n self.thumbnail_url = 'https://img.youtube.com/vi/%s/hqdefault.jpg' % self.get_video_id()\n \n return self.thumbnail_url",
"def get_image_url(self):\n return self.get_extract_image_urls(is_first=True)",
"async def avatarurl(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n embed = discord.Embed(description=f\"{target} Profile Picture\")\n embed.set_image(url=str(target.avatar_url))\n await ctx.send(embed=embed)",
"def get_url(self):\n return self._url",
"def thumbnail_url(self):\n return None",
"def get_thumbnail_url(self):\n \n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n api_url = 'http://vimeo.com/api/v2/video/%s.json' % self.get_video_id()\n try:\n res = self._oembed_request(api_url)[0]\n except KeyError:\n return ''\n self.thumbnail_url = res.get('thumbnail_large', '')\n return self.thumbnail_url",
"def photo(self):\n person = self.get_person()\n if person is None:\n return None\n return person.photo",
"def getBaseUrl(self):\n return self.url",
"def avatar_hash(self) -> undefined.UndefinedNoneOr[str]:",
"def base_url(self):\n return self._get_field('Session', 'base_url')",
"def get_url(self):\n if not self.__initialized:\n raise NSNitroError(\"Not initialized.\")\n return self.__baseurl",
"def get_url(self):\n return self.url",
"def get_url(self):\n return self.url",
"def get_uri(self):\n return self.url",
"def repo_url(self):\n return self._repo_url",
"def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))",
"def url(self):\n return self.__values['url']",
"def get_avatar_url_for_md5_email_address_hash(md5_hash: str) -> str | None:\n avatar = db.session.execute(\n select(DbUserAvatar)\n .join(DbUser)\n .filter(db.func.md5(DbUser.email_address) == md5_hash)\n ).scalar_one_or_none()\n\n if avatar is None:\n return None\n\n return avatar.url",
"def last_camera_image_url(self) -> str:\n return self.camera_info[\"cover_path\"]",
"def geturl(self):\n return self.__url",
"def get_avatar(request, username):\n try:\n user = User.objects.get(username=username)\n file = user.get_profile().avatar.file\n except:\n file = open(os.path.join(settings.MEDIA_ROOT, \"avatars\", \"default.png\"))\n ext = file.name.split('.')[-1]\n return HttpResponse(file, mimetype=\"image/\"+ext)",
"def base_url(self) -> URL:\n return (\n URL(self.url)\n if self.url is not None\n else URL.build(\n scheme=f\"http{'s' if self.ssl else ''}\",\n host=self.hostname or self.ipaddress,\n port=str(self.port) if self.port else None,\n path=self.base_api_path or \"\",\n )\n )",
"def image_url(self):\n context = aq_inner(self.context)\n obj_url = context.absolute_url()\n if hasattr(context, 'getField'):\n field = self.context.getField('image')\n if not field:\n field = context.getField(IMAGE_FIELD_NAME)\n\n if field and field.get_size(context) > 0:\n return u'%s/%s_%s' % (obj_url, field.getName(), 'thumb')\n\n return u\"%s/isaw_logo.png\" % self.portal.absolute_url()",
"def get_thumbnail_url(self):\n if self.thumbnail_url:\n return self.thumbnail_url\n \n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return ''\n \n if not self.thumbnail_url:\n thumb_url = self.res.get('slide_image_baseurl', '')\n thumb_suffix = self.res.get('slide_image_baseurl_suffix', '')\n if thumb_url and thumb_suffix:\n #hardcode: \"1\" means the slide that we want to show as thumbnail.\n # this case is slide number 1 of presentation.\n thumb_url = ''.join(['https:', thumb_url, '1', thumb_suffix])\n self.thumbnail_url = thumb_url\n \n return self.thumbnail_url"
] |
[
"0.83125377",
"0.80434006",
"0.79306704",
"0.7921607",
"0.7852214",
"0.7280996",
"0.7254777",
"0.7066657",
"0.6957944",
"0.69364053",
"0.6856294",
"0.6781074",
"0.6680307",
"0.6594353",
"0.6536579",
"0.6501867",
"0.634738",
"0.6297715",
"0.6278013",
"0.6261349",
"0.625929",
"0.62500554",
"0.61314595",
"0.6099951",
"0.6088467",
"0.60858476",
"0.60179067",
"0.6008921",
"0.5938852",
"0.5938551",
"0.5880365",
"0.58623403",
"0.5847042",
"0.5833749",
"0.58254784",
"0.58107483",
"0.5809608",
"0.5809608",
"0.5809608",
"0.57984215",
"0.57984215",
"0.5788622",
"0.57876927",
"0.5751451",
"0.57145387",
"0.5712731",
"0.57116044",
"0.5693684",
"0.56672657",
"0.5634734",
"0.5603786",
"0.55956084",
"0.55829024",
"0.5575058",
"0.5572664",
"0.55542964",
"0.5551989",
"0.5547794",
"0.55259776",
"0.5507562",
"0.5500646",
"0.5492212",
"0.54908097",
"0.5490435",
"0.5483824",
"0.5482406",
"0.5473473",
"0.5464877",
"0.54629505",
"0.54591465",
"0.54591465",
"0.545024",
"0.5435462",
"0.5426453",
"0.5424088",
"0.5406117",
"0.5397041",
"0.53928757",
"0.53926754",
"0.5387773",
"0.53823817",
"0.5361496",
"0.53492826",
"0.5346908",
"0.53436774",
"0.534011",
"0.5336239",
"0.53228647",
"0.53228647",
"0.5320353",
"0.5313659",
"0.5313514",
"0.53079146",
"0.5307746",
"0.5307404",
"0.52797204",
"0.5277342",
"0.52692324",
"0.52681595",
"0.52652866"
] |
0.8626505
|
0
|
Sets the avatar_url of this UserBase.
|
def avatar_url(self, avatar_url):
self._avatar_url = avatar_url
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'",
"def avatar_url(self):\n return self._avatar_url",
"def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()",
"def profile_image_url(self, profile_image_url):\n\n self._profile_image_url = profile_image_url",
"def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")",
"def avatar_id(self, avatar_id):\n\n self._avatar_id = avatar_id",
"async def avatar(self, url):\n # [p]set avatar <url>\n\n try:\n async with self.session.get(url) as r:\n data = await r.read()\n await self.bot.edit_profile(avatar=data)\n await self.bot.say(\"Done.\")\n log.debug(\"Changed avatar.\")\n except Exception as e:\n await self.bot.say(\"Error, check your console or logs for \"\n \"more information.\")\n log.exception(e)\n traceback.print_exc()",
"def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))",
"async def utils_set_avatar(self, ctx, url: str=None):\r\n if url is None:\r\n if not ctx.message.attachments:\r\n return await ctx.say(\"No avatar found! \"\r\n \"Provide an Url or Attachment!\")\r\n else:\r\n url = ctx.message.attachments[0].get(\"url\")\r\n\r\n ext = url.split(\".\")[-1]\r\n mime = mimetypes.types_map.get(ext)\r\n if mime is not None and not mime.startswith(\"image\"):\r\n # None can still be an image\r\n return await ctx.send(\"Url or Attachment is not an Image!\")\r\n\r\n async with aiohttp.ClientSession() as s, s.get(url) as r:\r\n if 200 <= r.status < 300:\r\n content = await r.read()\r\n else:\r\n return await ctx.send(\"Invalid Response code: {}\"\r\n .format(r.status_code))\r\n\r\n try:\r\n await self.amethyst.user.edit(avatar=content)\r\n except BaseException: # I don't know the exact Exception type\r\n return await ctx.send(\"Avatar was too big or not an image!\")\r\n\r\n await ctx.send(\"Successfully updated avatar!\")",
"def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)",
"def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )",
"def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")",
"def set_avatar(self, asset_id):\n # Implemented from template for osid.resource.ResourceForm.set_avatar_template\n if self.get_avatar_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_id(asset_id):\n raise errors.InvalidArgument()\n self._my_map['avatarId'] = str(asset_id)",
"def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"",
"def SetAvatar(self, fileName):\n\n self.__PostFile(\"/avatar\", fileName, \"avatar[file]\")",
"def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'",
"def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )",
"def registerAvatar(self, avatar):\r\n assert self._avatar is None\r\n self._avatar = avatar",
"def avatar_version(self, avatar_version):\n\n self._avatar_version = avatar_version",
"def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url",
"def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar",
"async def async_set_media_image_url(self, url):\n self._media_image_url = url",
"async def set_avatar(self, avatar_id, delay=0, lifespan=math.inf):\n await self.add_output(\n \"|/avatar {}\".format(avatar_id), delay=delay, lifespan=lifespan\n )",
"async def avatarurl(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n embed = discord.Embed(description=f\"{target} Profile Picture\")\n embed.set_image(url=str(target.avatar_url))\n await ctx.send(embed=embed)",
"def url(self, image_url):\n\n self._url = image_url",
"def user_url(self, user_url):\n\n self._user_url = user_url",
"def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url",
"def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off",
"def set_image(self, **kwargs):\n self.image = kwargs.get('url')",
"def image_url(self, image_url):\n\n self._image_url = image_url",
"def base_url(self, base_url):\n\n self._base_url = base_url",
"def base_url(self, base_url):\n\n self._base_url = base_url",
"def image_url(self, image_url: str):\n\n self._image_url = image_url",
"async def avatar(self, ctx, target: discord.User = None):\n if target is None:\n target = ctx.author\n await ctx.send(target.avatar_url)",
"def set_url(self, url):\n if url is not None:\n self.url = url",
"def avatar_id(self):\n return self._avatar_id",
"def test_resource_user_resource_change_user_avatar_patch(self):\n pass",
"def set_album_url(self, album_url: str) -> None:\n self.album_url = album_url",
"def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)",
"def setRemoteUrl(self, value, **kwargs):\n if value:\n value = urlparse.urlunparse(urlparse.urlparse(value))\n self.getField('remoteUrl').set(self, value, **kwargs)",
"def set_base_url(self, base_url):\n\n while base_url[-1] == '/':\n base_url = base_url[:-1]\n self.url = base_url\n self._update_children_url()",
"def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)",
"def repo_url(self, repo_url):\n\n self._repo_url = repo_url",
"def set_url(self, url):\n self.url = url",
"def set_thumbnail(self, **kwargs):\n self.thumbnail_url = kwargs.get('url')",
"def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)",
"def base_url(self, base_url):\n if base_url is not None and len(base_url) > 100:\n raise ValueError(\"Invalid value for `base_url`, length must be less than or equal to `100`\") # noqa: E501\n\n self._base_url = base_url",
"def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()",
"def _set_url(self): \n self.url = self.geturl()",
"def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'",
"async def avatar(self, ctx, *, member: disnake.Member = None):\n if member is None:\n member = ctx.author\n await ctx.send(member.display_avatar.url)",
"def profile_image_src(self, size):\n if self.profile_image:\n return join_path(STATIC_IMAGE_URL, 'users', \"{}.{}.{}.png\".format(self.id, self.profile_image, size)).replace(\"\\\\\", '/')\n return join_path(STATIC_IMAGE_URL, \"users\", \"no_profile.jpg\").replace(\"\\\\\", '/')",
"def Avatar(image_url: Optional[str] = None, size: Union[int, str] = 40, color: str = \"primary\"):\n user = auth.user.value\n if user:\n user_info = user.get(\"userinfo\", {})\n src = image_url\n if src is None:\n src = user_info.get(\"picture\")\n if src:\n with v.Avatar(size=size, class_=\"ma-2\"):\n v.Img(src=src)\n else:\n with v.Avatar(size=size, color=color):\n v.Icon(children=[\"mdi-account\"])\n else:\n with v.Avatar(size=size, color=color):\n with solara.Tooltip(\"No user\"):\n v.Icon(children=[\"mdi-error\"])",
"def save_profile_picture(current_user, url):\n # save profile_pic\n current_user.profile_pic = url\n current_user.save(current_user)",
"def set_image_from_url(self, url: str):\n response = httpx.get(url)\n if response.status_code == 200:\n file = ContentFile(response.content)\n file.name = \"url-\" + shortuuid.uuid()\n self.image = file\n self.save()",
"def set_url(self, url):\n self.data['url'] = url",
"def set_image_from_socialaccount(self, socialaccount: Union[str, SocialAccount]):\n if not self.image_is_identicon():\n return\n\n if not isinstance(socialaccount, SocialAccount):\n try:\n socialaccount = SocialAccount.objects.get(\n user=self.user, provider=socialaccount\n )\n except SocialAccount.DoesNotExist:\n return\n\n url = None\n provider = socialaccount.provider\n data = socialaccount.extra_data\n if provider == \"google\":\n url = data.get(\"picture\")\n elif provider == \"github\":\n url = data.get(\"avatar_url\")\n elif provider == \"twitter\":\n url = data.get(\"profile_image_url\")\n\n if url:\n self.set_image_from_url(url)",
"def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )",
"async def avatar(self, ctx, user: discord.User = None):\n if user is None:\n user = ctx.author\n avatar = user.avatar_url\n embed = discord.Embed(\n title=user.name + \"'s Avatar:\",\n color=discord.Colour.purple()\n )\n embed.set_image(url=avatar)\n await ctx.send(\"\", embed=embed)",
"def attachment_url(self, attachment_url):\n\n self._attachment_url = attachment_url",
"def handle_avatar(self, url, save=True):\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n fp = BytesIO(response.content)\r\n ext = url.split('.')[-1]\r\n ext = ext if ext in [\"png\", \"jpg\", \"gif\", \"jpeg\"] else \"jpg\"\r\n filename = \"{}.{}\".format(get_random_name(), ext)\r\n self.avatar.save(filename, File(fp), save=save)",
"def set_callback_url(self, callback_url):\n self.callback_url = callback_url",
"def redraw_avatar(cls):",
"def set_url(self, url):\n self.url = url",
"def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url",
"def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g",
"async def avatar(message):\n return \"link to your avatar: {}\".format(message.user_avatar)",
"async def avatar(message, user: ParamType.MIXER_USER):\n return \"link to @{} avatar: {}\".format(user.username, user.avatar_url)",
"async def avatar(self, ctx, user: discord.Member = None):\n\n if user is None:\n user = ctx.author\n\n avatar = user.avatar_url_as(static_format='png', size=1024)\n\n embed = discord.Embed(color=self.bot.embed_color)\n embed.set_author(name=f\"{user}'s avatar\", icon_url=avatar)\n embed.description = f'[[Download Avatar]]({avatar})'\n\n embed.set_image(url=avatar)\n\n await ctx.send(embed=embed)",
"def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar",
"def profile_pic(self, client_file_storage):\n\n # If we already have a profile picture, remove it\n if self.profile_pic_filename:\n filepath = os.path.join(\n current_app.config['UPLOADED_IMAGES_DEST'],\n self.profile_pic_filename)\n os.remove(filepath)\n self.profile_pic_filename = None\n self.profile_pic_url = None\n\n # This uploads & saves the file on the server\n # NOTE: It uses the secure_filename function...\n server_filename = images.save(client_file_storage)\n\n # Generate the URL to this file\n url = images.url(server_filename)\n\n # Store information with the user\n self.profile_pic_filename = server_filename\n self.profile_pic_url = url",
"def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url",
"async def avatar(self, ctx:utils.Context, user:discord.User=None):\n\n if user is None:\n user = ctx.author\n with utils.Embed(use_random_colour=True) as embed:\n embed.set_image(url=user.avatar_url)\n await ctx.send(embed=embed)",
"def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture",
"def image_url(self) -> str:\n return self._image_url",
"def setupAvatar(self) :\n\t\tself.avatarNP = NodePath(\"ourAvatarNP\")\n\t\tself.avatarNP.reparentTo(render)\n\t\tif Util.AUTO :\n\t\t\tp = random.randint(1,Places.getNumPlaces())\n\t\t\tnbors = Places.getNeighbors(p)\n\t\t\tself.avatarNP.setPos(Places.getLoc(p)[0],\n\t\t\t\t\t Places.getLoc(p)[1],0)\n\t\t\tt = nbors[random.randint(0,len(nbors)-1)]\n\t\t\tself.avatarNP.setH(self.heading(\n\t\t\t\tself.avatarNP.getX(), self.avatarNP.getY(),\n\t\t\t\tPlaces.getLoc(t)[0], Places.getLoc(t)[1]))\n\t\t\tself.oldPlace = p\n\t\t\tself.targetPlace = t\n\t\t\tself.turning = False\n\t\t\tself.deltaH = 0\n\t\telse :\n\t\t\tself.avatarNP.setPos(63,84,0)\n\t\t\tself.avatarNP.setH(0)\n\n\t\ts = str(self.avaModel)\n\t\tself.avatar = Actor(\"models/ava\" + s,{\"walk\":\"models/walk\" + s})\n\t\tself.avatar.reparentTo(self.avatarNP)\n\n\t\tif self.avaModel == 1 : self.avatar.setScale(.002)\n\t\telif self.avaModel == 2 : self.avatar.setScale(.3)\n\t\telif self.avaModel == 3 : self.avatar.setScale(1)\n\t\telif self.avaModel == 4 : self.avatar.setScale(.9)\n\t\tself.avatar.setPos(0,0,0)\n\t\tself.avatar.setH(0)",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def change_profile_img(self):\n get_photo = reddit_scrapper()\n get_photo.get_image()\n # Send image to instagram profile picture on the hidden input tag\n profile_pic_button = self.driver.find_elements_by_xpath(\n '//*[@id=\"react-root\"]/section/main/section/div[3]/div[1]/div[2]/form/input')[0].send_keys(os.getcwd() + '/daily_image/daily.jpg')\n\n time.sleep(1)\n save_profile_pic = self.driver.find_elements_by_xpath(\n '//button[contains(text(), \"Save\")]')[0].click()\n time.sleep(1)\n self.driver.get(base_url)",
"def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def url(self, url):\n\n self._url = url",
"def robo_avatar_url(user_data, size=80):\n hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()\n url = \"https://robohash.org/{hash}.png?size={size}x{size}\".format(\n hash=hash, size=size)\n return url",
"def setBaseURL(self,value):\n self.PDFreactorConfiguration.in1[\"baseURL\"] = value",
"def rotate_avatar(instance, **kwargs):\n if instance.avatar and not instance.is_avatar_rotated:\n rotate_user_avatar.apply_async(kwargs={'user_id': instance.pk})",
"def url(self, url: str):\n self._url = url",
"def image_url(self):\n return self.photo_url or GENERIC_IMAGE",
"async def avatar(self, ctx):\n e = discord.Embed(title=\"Here is a avatar image for you {}.\".format(ctx.author.name), color=discord.Color.magenta())\n e.set_image(url=nekos.img('avatar'))\n await ctx.send(embed=e)",
"def GetAvatarForUser(self, userName):\n\n return self.__GetJsonOrNone(\"/users/\"+userName+\"/avatar\", False)",
"def url(self, url):\n if url is not None and len(url) > 255:\n raise ValueError(\"Invalid value for `url`, length must be less than or equal to `255`\") # noqa: E501\n\n self._url = url",
"def facebook_url(self, facebook_url):\n\n self._facebook_url = facebook_url",
"async def _avatar(self, ctx: commands.Context, member: Member = None) -> None:\n\n if member is None:\n member = ctx.author\n\n embed = CleanEmbed(\n author_image=member.avatar_url,\n author_text=f\"{member.name}#{member.discriminator}'s avatar\",\n author_url=member.avatar_url,\n image_url=member.avatar_url\n )\n\n await ctx.send(embed=embed)",
"def access_token_url(self, access_token_url):\n\n self._access_token_url = access_token_url",
"async def avatar_command(self, ctx, member: Optional[Member]):\n member = member or ctx.author\n avatarUrl = member.avatar_url\n embed = Embed(\n title=f\"Avatar - {member.name}\",\n timestamp=datetime.utcnow(),\n color=Color.blurple(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_image(url=avatarUrl)\n await ctx.send(embed=embed)",
"def href(self, href):\n\n self._href = href",
"def href(self, href):\n\n self._href = href"
] |
[
"0.7171562",
"0.7052809",
"0.6945081",
"0.65909845",
"0.65333134",
"0.64174134",
"0.63824266",
"0.6325658",
"0.6313757",
"0.62597895",
"0.62147564",
"0.6207663",
"0.61743844",
"0.6130403",
"0.5995444",
"0.59891415",
"0.59879833",
"0.5912891",
"0.5886393",
"0.5850929",
"0.58470213",
"0.5778821",
"0.57513624",
"0.5595089",
"0.55389977",
"0.55209386",
"0.5502915",
"0.549757",
"0.54379195",
"0.5411947",
"0.5403753",
"0.5403753",
"0.54036546",
"0.5379312",
"0.53715855",
"0.53605014",
"0.5310645",
"0.52998704",
"0.52978736",
"0.528461",
"0.5256654",
"0.52429444",
"0.5240907",
"0.5215844",
"0.5210886",
"0.51874274",
"0.5147369",
"0.5139396",
"0.5122305",
"0.5118778",
"0.51185197",
"0.5089357",
"0.50798357",
"0.50647074",
"0.5050623",
"0.49960512",
"0.49919268",
"0.4984071",
"0.4974599",
"0.49736208",
"0.49669528",
"0.49396077",
"0.49382713",
"0.48964122",
"0.4876187",
"0.48659423",
"0.4821183",
"0.4819832",
"0.48127553",
"0.4799482",
"0.4770568",
"0.47606823",
"0.47549924",
"0.47481176",
"0.47142804",
"0.47043866",
"0.47016087",
"0.46716857",
"0.465811",
"0.4611595",
"0.4611595",
"0.4611595",
"0.4611595",
"0.4611595",
"0.4611595",
"0.4611595",
"0.46034437",
"0.45818797",
"0.45805883",
"0.45776093",
"0.45752156",
"0.4560571",
"0.45563647",
"0.4554964",
"0.45536467",
"0.45402637",
"0.45260733",
"0.4524238",
"0.45175043",
"0.45175043"
] |
0.83707076
|
0
|
Gets the avatar_version of this UserBase. Version for the user's avatar. Used for cachebusting requests for the user's avatar. Clients generally shouldn't need to use this; most avatar URLs sent by Zulip will already end with `?v={avatar_version}`.
|
def avatar_version(self):
return self._avatar_version
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def avatar_version(self, avatar_version):\n\n self._avatar_version = avatar_version",
"def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)",
"def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")",
"def avatar_url(self):\n return self._avatar_url",
"def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()",
"def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'",
"def avatar_id(self):\n return self._avatar_id",
"def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'",
"def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off",
"def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")",
"def app_avatar(self):\n with open(self.AVATAR_PATH, 'rb') as handle:\n return handle.read()",
"def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url",
"def GetAvatarForUser(self, userName):\n\n return self.__GetJsonOrNone(\"/users/\"+userName+\"/avatar\", False)",
"def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'",
"def get_version(self):\n args = {\"access_token\": self.access_token}\n try:\n response = self.session.request(\n \"GET\",\n FACEBOOK_GRAPH_URL + self.version + \"/me\",\n params=args,\n timeout=self.timeout,\n proxies=self.proxies,\n )\n except requests.HTTPError as e:\n response = json.loads(e.read())\n raise GraphAPIError(response)\n\n try:\n headers = response.headers\n version = headers[\"facebook-api-version\"].replace(\"v\", \"\")\n return str(version)\n except Exception:\n raise GraphAPIError(\"API version number not available\")",
"def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )",
"def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)",
"def latest_image_version(self) -> str:\n return self._latest_image_version",
"def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )",
"def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )",
"def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url",
"def avatar_hash(self) -> typing.Optional[str]:",
"def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar",
"def current_image_version(self) -> str:\n return self._current_image_version",
"def get_avatar_id(self):\n # Implemented from template for osid.resource.Resource.get_avatar_id_template\n if not self._my_map['avatarId']:\n raise errors.IllegalState('this Resource has no avatar')\n else:\n return Id(self._my_map['avatarId'])",
"def getAvatarInfo(self):\n return \", \".join(self._get_avatar_info())",
"def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"",
"def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))",
"def get_avatar(request, username):\n try:\n user = User.objects.get(username=username)\n file = user.get_profile().avatar.file\n except:\n file = open(os.path.join(settings.MEDIA_ROOT, \"avatars\", \"default.png\"))\n ext = file.name.split('.')[-1]\n return HttpResponse(file, mimetype=\"image/\"+ext)",
"def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar",
"def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)",
"def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)",
"def avatar_path(_instance, filename):\n file_path = os.path.join('avatars', str(uuid4()))\n ext = filename.split('.')[-1]\n return '{}.{}'.format(file_path, ext)",
"def avatar_hash(self) -> undefined.UndefinedNoneOr[str]:",
"def get_db_avatar(avatar_id: UserAvatarID) -> DbUserAvatar:\n return db.session.execute(\n select(DbUserAvatar).filter_by(id=avatar_id)\n ).scalar_one()",
"def get_avatar_path(instance, filename):\n\n parts = str(filename).split(\".\")\n return 'avatars/' + instance.username + '/' + slugify(parts[0]) + '.' + parts[1]",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''",
"def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g",
"async def avatar(message):\n return \"link to your avatar: {}\".format(message.user_avatar)",
"def get_avatar_metadata(self):\n # Implemented from template for osid.resource.ResourceForm.get_group_metadata_template\n metadata = dict(self._avatar_metadata)\n metadata.update({'existing_avatar_values': self._my_map['avatarId']})\n return Metadata(**metadata)",
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def rotate_avatar(instance, **kwargs):\n if instance.avatar and not instance.is_avatar_rotated:\n rotate_user_avatar.apply_async(kwargs={'user_id': instance.pk})",
"def get_version(self):\n return self.api_version",
"async def steam_avatar(message):\n s = message.content.strip()\n id = await parse_steam_id(s)\n profile = await steam_community.steam_profile(id.to_64(), id64=True)\n if profile.avatar:\n return profile.avatar\n else:\n raise CommandError(\"The user has no avatar.\")",
"def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res",
"def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''",
"def version(self):\n info = json.loads(self.get_info())\n return FapiInfo(info).version",
"def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url",
"def node_image_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"node_image_version\")",
"def node_image_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"node_image_version\")",
"def node_image_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"node_image_version\")",
"def version(self):\n if self._version is None:\n self.version = '{user}-{date}'.format(\n user=getpass.getuser().strip().lower(),\n date=datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d'))\n return self._version",
"async def avatar(message, user: ParamType.MIXER_USER):\n return \"link to @{} avatar: {}\".format(user.username, user.avatar_url)",
"def test_resource_user_resource_get_avatar_file_get(self):\n pass",
"def get_resource_version(self) -> str:\n return self._version",
"def requestAvatarId(self, credentials):\n return credentials.username",
"def version(self):\n self._get_latest_content()\n return self._data.get('version', None)",
"def get_version(self) -> str:\n return versioning.get_version()",
"def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture",
"def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)",
"def query_api_version(self):\n version_resp = self._session.get('/api/version',\n logon_required=False)\n self._api_version = version_resp\n return self._api_version",
"def UserImage(self, service_user_image=None):\n if not self.image:\n return None\n if '@' not in self.image:\n return self.image\n user_image = (\n self.annotations.get(USER_IMAGE_ANNOTATION) or service_user_image)\n if not user_image:\n return self.image\n # The image should be in the format base@sha256:hashhashhash\n match = USER_IMAGE_PATTERN.match(self.image)\n if not match:\n return self.image\n (base, h) = match.group(1, 2)\n if not user_image.startswith(base):\n # The user-image is out of date.\n return self.image\n if len(h) > 8:\n h = h[:8] + '...'\n return user_image + ' at ' + h",
"def get_version(self):\n return self.bot_data_file[\"version\"]",
"async def avatar(self, url):\n # [p]set avatar <url>\n\n try:\n async with self.session.get(url) as r:\n data = await r.read()\n await self.bot.edit_profile(avatar=data)\n await self.bot.say(\"Done.\")\n log.debug(\"Changed avatar.\")\n except Exception as e:\n await self.bot.say(\"Error, check your console or logs for \"\n \"more information.\")\n log.exception(e)\n traceback.print_exc()",
"def get_version(self):\n return self.version",
"def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None",
"def get_avatar_url_for_user(user_id: UserID) -> str | None:\n avatar_urls_by_user_id = get_avatar_urls_for_users({user_id})\n return avatar_urls_by_user_id.get(user_id)",
"def get_available_avatars(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/GetAvailableAvatars/\"))",
"def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)",
"def build_avatar(login, request):\n log.debug(\"# Building avatar\")\n from {{package}}.models import DBSESSION\n from {{package}}.models.model import User\n avatar = DBSESSION.query(Account).filter_by(account_lid=login).first()\n request.session['user'] = avatar\n if avatar:\n return []",
"def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def GetVersion(image):\n parts = image.name.rsplit('v', 1)\n if len(parts) != 2:\n log.debug('Skipping image with malformed name [%s].', image.name)\n return None\n return parts[1]",
"def api_version(self):\n\n return self._api_version",
"def requestAvatar(self, avatarId, mind, *interfaces):\n if conchinterfaces.IConchUser in interfaces:\n avatar = SSHSimpleAvatar(avatarId, self.proto)\n return interfaces[0], avatar, lambda: None\n else:\n raise Exception(\"No supported interfaces found.\")",
"def get_version(self, params):\n return self.version",
"def robo_avatar_url(user_data, size=80):\n hash = md5(str(user_data).strip().lower().encode('utf-8')).hexdigest()\n url = \"https://robohash.org/{hash}.png?size={size}x{size}\".format(\n hash=hash, size=size)\n return url",
"def get_Version(test_case, real_version=False, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, bool, Optional[HeadersType], Optional[CookiesType]) -> Str\n if not real_version:\n version = get_constant(\"MAGPIE_TEST_VERSION\")\n if version:\n return version\n version = getattr(test_case, \"version\", None)\n if version:\n return version\n app_or_url = get_app_or_url(test_case)\n resp = test_request(app_or_url, \"GET\", \"/version\",\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n json_body = check_response_basic_info(resp, 200)\n return json_body[\"version\"]",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version",
"def get_api_version(self):\n major, minor, patch = self.client.config['api_version']\n return '%s.%s.%s' % (major, minor, patch)",
"async def async_camera_image(self):\n last_image = self._nvr.get_snapshot_image(self._uuid)\n self._last_image = last_image\n return self._last_image",
"def version_bytes(self) -> str:\n return pulumi.get(self, \"version_bytes\")",
"def get(self):\n return self._version",
"def get_latest_image():\n return sqldb.get_latest_image()",
"async def avatar(self, ctx, *, member: disnake.Member = None):\n if member is None:\n member = ctx.author\n await ctx.send(member.display_avatar.url)",
"async def version(self) -> str:\n response = await self._request(\"status\")\n return response[\"version\"]",
"def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]",
"def getversion(self):\n return self.__version",
"def get_version(self, dataset_name=None):\n if dataset_name is None:\n return self._version\n else:\n # resolve dataset name\n dataset = self.__getitem__(dataset_name)\n try:\n # dataset can be either an HDF5 dataset or numpy.ndarray\n version = dataset.attrs.get(\"version\")\n except AttributeError:\n version = None\n if version is None:\n version = self._version\n if isinstance(version, bytes):\n return version.decode() # for python3\n return version",
"async def utils_set_avatar(self, ctx, url: str=None):\r\n if url is None:\r\n if not ctx.message.attachments:\r\n return await ctx.say(\"No avatar found! \"\r\n \"Provide an Url or Attachment!\")\r\n else:\r\n url = ctx.message.attachments[0].get(\"url\")\r\n\r\n ext = url.split(\".\")[-1]\r\n mime = mimetypes.types_map.get(ext)\r\n if mime is not None and not mime.startswith(\"image\"):\r\n # None can still be an image\r\n return await ctx.send(\"Url or Attachment is not an Image!\")\r\n\r\n async with aiohttp.ClientSession() as s, s.get(url) as r:\r\n if 200 <= r.status < 300:\r\n content = await r.read()\r\n else:\r\n return await ctx.send(\"Invalid Response code: {}\"\r\n .format(r.status_code))\r\n\r\n try:\r\n await self.amethyst.user.edit(avatar=content)\r\n except BaseException: # I don't know the exact Exception type\r\n return await ctx.send(\"Avatar was too big or not an image!\")\r\n\r\n await ctx.send(\"Successfully updated avatar!\")",
"def api_version(self):\n\n\t\treturn self._api_version",
"def DeleteAvatar(self):\n\n return self.__Delete(\"/avatar\")",
"def image(self) -> str:\n return getattr(\n self.auth_accounts[-1], \"image\" # pylint: disable=unsubscriptable-object\n )",
"def avatar_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_avatar_url, self.title)\r\n return mark_safe(h)",
"def version(self):\n\n return self.manifest[\"version\"]",
"def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")",
"def get_new_image(self):\n return self.vid_mem_reader.get_latest_image()[0]"
] |
[
"0.6582307",
"0.6567599",
"0.627431",
"0.62414306",
"0.6070075",
"0.60053706",
"0.5953287",
"0.5907676",
"0.59010553",
"0.5892529",
"0.58835626",
"0.58578086",
"0.58211213",
"0.55421185",
"0.5502481",
"0.54706377",
"0.5448654",
"0.5431678",
"0.540974",
"0.53088725",
"0.5273445",
"0.5252839",
"0.5223232",
"0.51925504",
"0.5183061",
"0.5154204",
"0.51488006",
"0.5133219",
"0.5133151",
"0.51166785",
"0.51049024",
"0.5066195",
"0.506548",
"0.5024727",
"0.501721",
"0.5003408",
"0.4988783",
"0.49515855",
"0.49503088",
"0.49138856",
"0.4856994",
"0.4855665",
"0.4852989",
"0.4788832",
"0.47823074",
"0.47407132",
"0.4729126",
"0.47071743",
"0.4677416",
"0.4677336",
"0.46762285",
"0.46762285",
"0.46762285",
"0.4672779",
"0.4638666",
"0.463254",
"0.46076876",
"0.46001902",
"0.45850492",
"0.457619",
"0.45724487",
"0.45650628",
"0.4559747",
"0.45457366",
"0.45429853",
"0.45340005",
"0.4527804",
"0.4527256",
"0.45237425",
"0.45222783",
"0.451667",
"0.4482151",
"0.44629192",
"0.44602683",
"0.44602683",
"0.44566026",
"0.44489288",
"0.44455057",
"0.44454688",
"0.44348648",
"0.44340587",
"0.44322085",
"0.4428363",
"0.44218868",
"0.4416438",
"0.4413006",
"0.4407583",
"0.440061",
"0.4398069",
"0.43965673",
"0.4393493",
"0.43925568",
"0.4386212",
"0.43849438",
"0.43807927",
"0.43722627",
"0.43721446",
"0.43649462",
"0.43646377",
"0.43584177"
] |
0.80662423
|
0
|
Sets the avatar_version of this UserBase. Version for the user's avatar. Used for cachebusting requests for the user's avatar. Clients generally shouldn't need to use this; most avatar URLs sent by Zulip will already end with `?v={avatar_version}`.
|
def avatar_version(self, avatar_version):
self._avatar_version = avatar_version
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def avatar_version(self):\n return self._avatar_version",
"def avatar_url(self, avatar_url):\n\n self._avatar_url = avatar_url",
"def update_avatar(self, url):\n if self.avatar:\n logging.info(f'{self} already has an avatar: {self.avatar}')\n # TODO: check if image has been updated\n else:\n logging.info(f'{self} has no profile image.')\n img_temp = NamedTemporaryFile(delete=True)\n # TODO: Use requests instead of urllib?\n img_temp.write(urlopen(url).read()) # noqa: S310\n img_temp.flush()\n self.avatar.save(f'{self.pk}', File(img_temp))",
"def avatar_id(self, avatar_id):\n\n self._avatar_id = avatar_id",
"def set_default_avatar(cls, v, *, values): # pylint: disable=no-self-argument\n seed = values[\"username\"]\n return v or f\"https://picsum.photos/seed/{seed}/200/\"",
"async def avatar(self, url):\n # [p]set avatar <url>\n\n try:\n async with self.session.get(url) as r:\n data = await r.read()\n await self.bot.edit_profile(avatar=data)\n await self.bot.say(\"Done.\")\n log.debug(\"Changed avatar.\")\n except Exception as e:\n await self.bot.say(\"Error, check your console or logs for \"\n \"more information.\")\n log.exception(e)\n traceback.print_exc()",
"def avatar_url(self):\n if self.avatar and hasattr(self.avatar, 'url'):\n return self.avatar.url\n else:\n return '/static/defaults/!default_user_avatar/user.gif'",
"async def utils_set_avatar(self, ctx, url: str=None):\r\n if url is None:\r\n if not ctx.message.attachments:\r\n return await ctx.say(\"No avatar found! \"\r\n \"Provide an Url or Attachment!\")\r\n else:\r\n url = ctx.message.attachments[0].get(\"url\")\r\n\r\n ext = url.split(\".\")[-1]\r\n mime = mimetypes.types_map.get(ext)\r\n if mime is not None and not mime.startswith(\"image\"):\r\n # None can still be an image\r\n return await ctx.send(\"Url or Attachment is not an Image!\")\r\n\r\n async with aiohttp.ClientSession() as s, s.get(url) as r:\r\n if 200 <= r.status < 300:\r\n content = await r.read()\r\n else:\r\n return await ctx.send(\"Invalid Response code: {}\"\r\n .format(r.status_code))\r\n\r\n try:\r\n await self.amethyst.user.edit(avatar=content)\r\n except BaseException: # I don't know the exact Exception type\r\n return await ctx.send(\"Avatar was too big or not an image!\")\r\n\r\n await ctx.send(\"Successfully updated avatar!\")",
"def SetAvatar(self, fileName):\n\n self.__PostFile(\"/avatar\", fileName, \"avatar[file]\")",
"def set_avatar(self, asset_id):\n # Implemented from template for osid.resource.ResourceForm.set_avatar_template\n if self.get_avatar_metadata().is_read_only():\n raise errors.NoAccess()\n if not self._is_valid_id(asset_id):\n raise errors.InvalidArgument()\n self._my_map['avatarId'] = str(asset_id)",
"def avatar(self) -> str:\n\t\tif self.hasAvatar:\n\t\t\treturn f'https://avatars.atelier801.com/{self.id % 10000}/{self.id}.jpg'\n\n\t\t# default avatar\n\t\treturn 'https://avatars.atelier801.com/0/0.jpg'",
"def avatar_url(self):\n return self._avatar_url",
"def default_avatar_url(self) -> files.URL: # noqa: D401 - Imperative mood\n return routes.CDN_DEFAULT_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n discriminator=int(self.discriminator) % 5,\n file_format=\"png\",\n )",
"def avatar_url(self) -> typing.Optional[files.URL]:\n return self.make_avatar_url()",
"def test_resource_user_resource_change_user_avatar_patch(self):\n pass",
"async def update_version(self, version: int):\n async with open(self.__file_name, mode=\"r\") as auth_file:\n tag_data = json.loads(await auth_file.read())\n await auth_file.close()\n async with open(self.__file_name, mode=\"w\") as auth:\n tag_data[\"version\"] = version\n await auth.write(json.dumps(tag_data, indent=2, sort_keys=True))\n await auth.close()\n self.__version = version",
"def make_avatar_url(self, *, ext: typing.Optional[str] = None, size: int = 4096) -> typing.Optional[files.URL]:\n if self.avatar_hash is None:\n return None\n\n if ext is None:\n if self.avatar_hash.startswith(\"a_\"):\n ext = \"gif\"\n else:\n ext = \"png\"\n\n return routes.CDN_USER_AVATAR.compile_to_file(\n urls.CDN_URL,\n user_id=self.id,\n hash=self.avatar_hash,\n size=size,\n file_format=ext,\n )",
"def avatar_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"avatar_url\")",
"async def set_avatar(self, avatar_id, delay=0, lifespan=math.inf):\n await self.add_output(\n \"|/avatar {}\".format(avatar_id), delay=delay, lifespan=lifespan\n )",
"def registerAvatar(self, avatar):\r\n assert self._avatar is None\r\n self._avatar = avatar",
"def rotate_avatar(instance, **kwargs):\n if instance.avatar and not instance.is_avatar_rotated:\n rotate_user_avatar.apply_async(kwargs={'user_id': instance.pk})",
"def GetAvatar(self):\n\n return self.__GetJsonOrNone(\"/users/\"+self.userName+\"/avatar\", False)",
"def ip_version(self, ip_version):\n\n self._ip_version = ip_version",
"def avatar_id(self):\n return self._avatar_id",
"def avatar_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"avatar_url\")",
"def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)",
"def avatar(user):\n\n if user_service.user_has_avatar(user.id):\n return url_for('user.view_avatar', user_id=user.id)\n\n # Set default values gravatar\n default = 'identicon'\n size = 100\n email = user.email or ''\n\n # Construct the url\n gravatar_url = 'https://www.gravatar.com/avatar/' + \\\n hashlib.md5(\n email.lower().encode('utf-8')).hexdigest() + '?'\n gravatar_url += urllib.parse.urlencode({'d': default, 's': str(size)})\n return gravatar_url",
"def resolve_avatar(self, info):\n if self.avatar:\n self.avatar = info.context.build_absolute_uri(self.avatar.url)\n return self.avatar",
"def get_user_avatar(user: Union[discord.User, discord.Member]) -> str:\n return user.avatar_url if user.avatar_url is not None else user.default_avatar_url",
"def avatar(self):\n admin_user = User.objects.get(pk=1)\n email = self.original_author\n\n if self.author != admin_user:\n email = self.author.email\n import hashlib\n value = hashlib.md5(email)\n\n return 'http://www.gravatar.com/avatar/%s' % value.hexdigest() + '?s=200'",
"def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_property(\"version\")",
"def avatar(self, size):\n digest = md5(str(self.email).encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size)",
"def version(self, version):\n \n self._version = version",
"def _get_avatar_url(user: Member) -> str:\n # if user.avatar.startswith(\"a\"):\n # url = user.avatar_url_as(format=\"gif\")\n # else:\n url = user.avatar_url_as(format=\"png\")\n\n return url.split(\"?\")[0] # we really don't care about the size, chop it off",
"def redraw_avatar(cls):",
"def profile_image_url(self, profile_image_url):\n\n self._profile_image_url = profile_image_url",
"def version(self, version):\n self._version = version",
"def version(self, version):\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"def version(self, version: str):\n\n self._version = version",
"async def avatar(message, user: ParamType.MIXER_USER):\n return \"link to @{} avatar: {}\".format(user.username, user.avatar_url)",
"def set_version(v):\n old = get_version()\n sys.stderr.write('%s -> %s\\n' % (old, v))\n with open(INIT, 'r+') as f:\n text = f.read()\n text = pattern.sub(\"__version__ = %r\" % v, text)\n f.seek(0)\n f.truncate()\n f.write(text)",
"def avatar_url(self):\n return gravatar_for_email(self.user.email, size=40)",
"async def avatar_command(self, ctx, member: Optional[Member]):\n member = member or ctx.author\n avatarUrl = member.avatar_url\n embed = Embed(\n title=f\"Avatar - {member.name}\",\n timestamp=datetime.utcnow(),\n color=Color.blurple(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_image(url=avatarUrl)\n await ctx.send(embed=embed)",
"def player_avatar(player_obj):\n avatar = \"http://www.gravatar.com/avatar/%s.jpg?d=monsterid\" % md5(player_obj.user.email).hexdigest()\n\n return avatar",
"def GetAvatarForUser(self, userName):\n\n return self.__GetJsonOrNone(\"/users/\"+userName+\"/avatar\", False)",
"def version(self, version: int):\n\n self._version = version",
"async def avatar(self, ctx, *, member: disnake.Member = None):\n if member is None:\n member = ctx.author\n await ctx.send(member.display_avatar.url)",
"def test_user_avatar_serving(self):\n User = get_user_model()\n test_user = User.objects.create_user('Bob', 'bob@bob.com', 'pass123',\n set_default_avatar=True)\n\n avatar_url = reverse('misago:user-avatar', kwargs={\n 'pk': test_user.pk,\n 'hash': test_user.avatar_hash,\n 'size': 150,\n })\n response = self.client.get(avatar_url)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response['Content-Type'], 'image/png')",
"def set_version(self, version=None, version_fun: Callable[[], str] = None):\n def version_compute():\n fun = version_fun\n if fun is None:\n fun = default_version_hash\n\n if version is None:\n return fun()\n else:\n return version\n\n self.version = version_compute\n return self",
"def set_profile_version(context, profile_id, version):\n\n check_profile_id(profile_id)\n ps = getToolByName(context, 'portal_setup')\n\n ps.setLastVersionForProfile(profile_id, unicode(version))\n assert(ps.getLastVersionForProfile(profile_id) == (version, ))\n print \"Set version for '%s' to '%s'.\" % (profile_id, version)",
"async def avatar(self, ctx, user: discord.Member = None):\n\n if user is None:\n user = ctx.author\n\n avatar = user.avatar_url_as(static_format='png', size=1024)\n\n embed = discord.Embed(color=self.bot.embed_color)\n embed.set_author(name=f\"{user}'s avatar\", icon_url=avatar)\n embed.description = f'[[Download Avatar]]({avatar})'\n\n embed.set_image(url=avatar)\n\n await ctx.send(embed=embed)",
"def put(self, account=None, user=None, account_id=None):\n file = request.files.get('file')\n filename = f\"{account_id}/avatar.img\"\n\n engine = S3Engine()\n url = engine.put_object(filename, file.read())\n Account.update(vertex_id=account_id,\n validated_data={\"avatarLink\": url})\n\n return jsonify_response({\n \"id\": account.id,\n \"title\": account.title,\n \"avatarLink\": url\n })",
"def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()",
"async def avatar(message):\n return \"link to your avatar: {}\".format(message.user_avatar)",
"def get_avatar(self, size):\n\n digest = md5(self.email.encode('utf-8')).hexdigest()\n return 'https://www.gravatar.com/avatar/{}?d=identicon&s={}'.format(\n digest, size\n )",
"def avatar_path(_instance, filename):\n file_path = os.path.join('avatars', str(uuid4()))\n ext = filename.split('.')[-1]\n return '{}.{}'.format(file_path, ext)",
"def __init__(self, version, image):\n self.version = version\n self.major_version = get_major_version(self.version)\n self.image = image",
"def app_avatar(self):\n with open(self.AVATAR_PATH, 'rb') as handle:\n return handle.read()",
"def set_version(self, version):\n\n def update_version(version, filepath):\n with open(filepath, \"r\") as stream:\n contents = stream.read()\n\n new_contents = _fix_contents_version(contents, version)\n assert contents != new_contents\n with open(filepath, \"w\") as stream:\n stream.write(new_contents)\n\n update_version(version, os.path.join(\".\", \"package.json\"))\n update_version(version, os.path.join(\".\", \"src\", \"setup.py\"))\n update_version(\n version, os.path.join(\".\", \"src\", \"robocorp_code\", \"__init__.py\")\n )",
"async def avatarurl(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n embed = discord.Embed(description=f\"{target} Profile Picture\")\n embed.set_image(url=str(target.avatar_url))\n await ctx.send(embed=embed)",
"def set_version(self, version, dataset_name=None):\n if dataset_name is None:\n self._version = version\n return self._version\n\n # resolve dataset name\n dataset = self.__getitem__(dataset_name)\n if dataset is None:\n raise KeyError(\"Dataset %s does not exist\" % dataset_name)\n dataset.attrs[\"version\"] = version\n return version",
"def firmware_version(self, firmware_version: str):\n\n self._firmware_version = firmware_version",
"async def avatar(self, ctx, user: discord.User = None):\n if user is None:\n user = ctx.author\n avatar = user.avatar_url\n embed = discord.Embed(\n title=user.name + \"'s Avatar:\",\n color=discord.Colour.purple()\n )\n embed.set_image(url=avatar)\n await ctx.send(\"\", embed=embed)",
"def handle_avatar(self, url, save=True):\r\n response = requests.get(url)\r\n if response.status_code == 200:\r\n fp = BytesIO(response.content)\r\n ext = url.split('.')[-1]\r\n ext = ext if ext in [\"png\", \"jpg\", \"gif\", \"jpeg\"] else \"jpg\"\r\n filename = \"{}.{}\".format(get_random_name(), ext)\r\n self.avatar.save(filename, File(fp), save=save)",
"def __init__(self, *args, **kwargs):\n \n super(AvatarView, self).__init__(*args, **kwargs)\n \n wm = bpy.context.window_manager\n wm.verse_avatars.add()\n wm.verse_avatars[-1].node_id = self.id\n \n # Force redraw of 3D view\n ui.update_all_views(('VIEW_3D',))\n\n self.scene_node = None\n view_initialized = False\n self.visualized = True\n self.cur_area = None\n self.cur_space = None\n\n if self.id == self.session.avatar_id:\n # Initialize default values\n self.cur_screen = bpy.context.screen\n self.__class__.__my_view = self\n\n # Try to find current 3D view \n for area in bpy.context.screen.areas.values():\n if area.type == 'VIEW_3D':\n self.cur_area = area\n for space in area.spaces.values():\n if space.type == 'VIEW_3D':\n self.cur_space = space\n break\n break\n\n if self.cur_area.type == 'VIEW_3D' and self.cur_space.type == 'VIEW_3D':\n view_initialized = True\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n # Location\n self.location = AvatarLocation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_location))\n # Rotation\n self.rotation = AvatarRotation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_rotation))\n # Distance\n self.distance = AvatarDistance(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_distance,))\n # Perspective/Orthogonal\n self.perspective = AvatarPerspective(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_perspective,))\n # Width\n self.width = AvatarWidth(\n tg=self.view_tg,\n value=(self.cur_area.width,))\n # Height\n self.height = AvatarHeight(\n tg=self.view_tg,\n value=(self.cur_area.height,))\n # Lens\n self.lens = AvatarLens(\n tg=self.view_tg,\n value=(self.cur_space.lens,))\n # Get current Scene ID\n if bpy.context.scene.verse_node_id != -1:\n scene_node_id = bpy.context.scene.verse_node_id\n else:\n scene_node_id = 0\n self.scene_node_id = AvatarScene(\n tg=self.view_tg,\n value=(scene_node_id,))\n \n # TODO: check following code (may be not needed anymore)\n original_type = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n bpy.ops.view3d.verse_avatar()\n bpy.context.area.type = original_type\n else:\n # TODO: Add some assert, because this should not happen.\n pass\n else:\n self.__class__.__other_views[self.id] = self\n \n if view_initialized is False:\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n self.location = AvatarLocation(tg=self.view_tg)\n self.rotation = AvatarRotation(tg=self.view_tg)\n self.distance = AvatarDistance(tg=self.view_tg)\n self.perspective = AvatarPerspective(tg=self.view_tg)\n self.width = AvatarWidth(tg=self.view_tg)\n self.height = AvatarHeight(tg=self.view_tg)\n self.lens = AvatarLens(tg=self.view_tg)\n self.scene_node_id = AvatarScene(tg=self.view_tg)",
"def avatar_hash(self) -> typing.Optional[str]:",
"def avatar(self, size=200, d=\"robohash\"):\n\t\tdigest = md5(self.email.lower().encode('utf-8')).hexdigest()\n\t\tg = \"https://gravatar.com/avatar/{}?d={}&s={}\".format(digest, d, size)\n\t\treturn g",
"def update(self, request, *args, **kwargs):\n username = kwargs.get(\"user\")\n response = super().update(request, *args, **kwargs)\n cache.set(f\"{USER_PROFILE_PREFIX}{username}\", response.data)\n return response",
"async def on_ready(self):\n logging.info('Bot online as %s.', self.user)\n logging.info('avatar %s', self.user.avatar)\n if not self.user.avatar or self.user.avatar != self.AVATAR_HASH:\n logging.info('Changing avatar.')\n await self.user.edit(avatar=self.app_avatar)\n await self.change_presence(activity=self.__activity)",
"def version_name(self, version_name):\n\n self._version_name = version_name",
"async def avatar(self, ctx, target: discord.User = None):\n if target is None:\n target = ctx.author\n await ctx.send(target.avatar_url)",
"def hypervisor_version(self, hypervisor_version):\n\n self._hypervisor_version = hypervisor_version",
"def get_avatar_path(instance, filename):\n\n parts = str(filename).split(\".\")\n return 'avatars/' + instance.username + '/' + slugify(parts[0]) + '.' + parts[1]",
"def Avatar(image_url: Optional[str] = None, size: Union[int, str] = 40, color: str = \"primary\"):\n user = auth.user.value\n if user:\n user_info = user.get(\"userinfo\", {})\n src = image_url\n if src is None:\n src = user_info.get(\"picture\")\n if src:\n with v.Avatar(size=size, class_=\"ma-2\"):\n v.Img(src=src)\n else:\n with v.Avatar(size=size, color=color):\n v.Icon(children=[\"mdi-account\"])\n else:\n with v.Avatar(size=size, color=color):\n with solara.Tooltip(\"No user\"):\n v.Icon(children=[\"mdi-error\"])",
"def SetGuardRailVersion(self, *args, **kwargs):\n # type: (*Any, **Any) -> None\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\n \"setGuardRailVersion\", payload=payload, response_object=None\n )",
"def set_installed_version(vcs, version):\n version_path = _get_version_path(vcs)\n with open(version_path, 'w') as f:\n f.write(version)",
"def version(self, version):\n self._version = utils.VersionParser().parse(version)",
"def __version(self, agent_version):\n if self.path == \".\":\n return \"v\" + agent_version\n\n return \"v0\" + agent_version[1:]",
"def _cbAuthenticated(self, avatar, connection):\r\n connection.registerAvatar(avatar)\r\n return avatar.callRemote('getUserView', False)"
] |
[
"0.6936227",
"0.649719",
"0.59142345",
"0.5799783",
"0.57989764",
"0.5582525",
"0.54501593",
"0.5385766",
"0.5370928",
"0.5284951",
"0.5270443",
"0.5203477",
"0.5187659",
"0.51164407",
"0.5090491",
"0.50683516",
"0.5042792",
"0.503107",
"0.50070167",
"0.498359",
"0.49506572",
"0.4885295",
"0.486961",
"0.485535",
"0.48539692",
"0.4788982",
"0.47376263",
"0.47132015",
"0.46971616",
"0.46870306",
"0.4674957",
"0.4662485",
"0.46524215",
"0.46265653",
"0.46226808",
"0.46112308",
"0.46061856",
"0.46061856",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.46059573",
"0.4604995",
"0.4604995",
"0.45979416",
"0.45789242",
"0.4576615",
"0.45760652",
"0.45535433",
"0.45483646",
"0.4526276",
"0.45080137",
"0.45063415",
"0.45061266",
"0.44961447",
"0.44875324",
"0.44550195",
"0.44506705",
"0.44480908",
"0.4440359",
"0.44222933",
"0.44191912",
"0.4416744",
"0.44157",
"0.4415107",
"0.43935752",
"0.43757173",
"0.43443286",
"0.43349168",
"0.43292892",
"0.43220025",
"0.42902896",
"0.42652684",
"0.42458633",
"0.42280573",
"0.42190206",
"0.42186677",
"0.42184436",
"0.4217448",
"0.42102966",
"0.42070866",
"0.42065582",
"0.4205221",
"0.4190478"
] |
0.8566245
|
0
|
Gets the full_name of this UserBase. Full name of the user or bot, used for all display purposes.
|
def full_name(self):
return self._full_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def full_name(self):\n return self.user.get_full_name() or None",
"def get_full_name(self):\n return self.username",
"def get_full_name(self):\n return self.username",
"def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()",
"def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)",
"def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name",
"def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def get_user_fullname(self):\n member = self.get_user()\n if member:\n return member.getProperty('fullname')",
"def full_name(self, obj: User) -> str:\n return obj.get_full_name()",
"def get_user_fullname(self):\n return self.applicant.userprofile.display_name()",
"def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()",
"def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name",
"def get_displayname(self):\n return self.full_name or self.user.username",
"def get_full_name(self):\n\n return self.name",
"def get_full_name(self):\n\n return self.name",
"def get_full_name(self):\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()",
"def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def get_full_name(self):\n return self.name+self.last_name",
"def get_full_name(self):\n return self.name + \" \" + self.email",
"def get_full_name(self) -> str:\n return f\"{self.first_name} {self.last_name}\"",
"def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username",
"def get_full_name(self):\n full_name = '{} {}'.format(self.first_name, self.last_name)\n return full_name.strip()",
"def full_name(self) -> str:\n return self._name",
"def get_full_name(self):\n return u'%s %s' % (self.first_name, self.last_name)",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)",
"def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username",
"def get_full_name(self):\n return self.last_name + self.first_name",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def full_name_short(self):\n return \"{}. {}\".format(str(self.user.first_name)[:1], self.user.last_name)",
"def full_name(self):\n return self.first_name + \" \" + self.last_name",
"def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()",
"def get_current_user_full_name(self):\n user_service = self.runtime.service(self, 'user')\n xb_user = user_service.get_current_user()\n\n return xb_user.full_name",
"def get_full_name(self):\n return \"{} {}\".format(self.first_name, self.last_name)",
"def get_full_name(self):\n return \"%s %s\" % (self._first_name, self._last_name)",
"def get_full_name(self):\n return self.first_name + ' ' + self.last_name",
"def get_full_name(self):\n return f'{self.first_name} {self.last_name}'",
"def get_short_name(self):\n # The user is identified by their email address\n return self.first_name",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def get_full_name(self, include_title: bool = True) -> str:\n\n full_name = self.user.get_full_name()\n if include_title and self.title:\n title = self.get_title_repr()\n return f\"{full_name}, {title}\"\n return full_name",
"def get_fulll_name(self):\n return self.name",
"def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)",
"def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)",
"def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()",
"def getUserName(self):\n user = User.by_id(self.user_id)\n return user.name",
"def get_full_name(self):\n\t\treturn self.email",
"def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()",
"def get_full_name(self):\n return self.name #self is base and it hits name filed",
"def get_name(self) :\n\n return self.factory.to_user_name(self.name)",
"def get_full_name(cls, user_id):\n\n u = cls.query.get_or_404(user_id)\n u_first = u.first_name\n u_last = u.last_name \n\n u_full = f\"{u_first} {u_last}\"\n\n return u_full",
"def get_short_name(self):\n return self.username",
"def get_short_name(self):\n return self.username",
"def get_short_name(self):\n return self.username",
"def get_full_name(self):\n return self.first_name+\" \"+self.last_name",
"def full_name(self) -> Optional[str]:\n return pulumi.get(self, \"full_name\")",
"def full_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"full_name\")",
"def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)",
"def get_name(self):\n return self.user.username if self.user.username else self.user.email",
"def user_name(self):\n\n return self._user_name",
"def get_full_name(self):\n\n return self.name",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()"
] |
[
"0.8647986",
"0.8576306",
"0.8576306",
"0.8471668",
"0.84708536",
"0.8437073",
"0.8408093",
"0.8342528",
"0.8340403",
"0.8282167",
"0.8266369",
"0.8264482",
"0.81552136",
"0.8024534",
"0.8024534",
"0.80235624",
"0.8002769",
"0.8002769",
"0.8002769",
"0.8002769",
"0.8002769",
"0.7961438",
"0.7949127",
"0.79474574",
"0.7916694",
"0.79138005",
"0.78915834",
"0.7883655",
"0.7877232",
"0.78766084",
"0.78604245",
"0.78604245",
"0.7856616",
"0.78565025",
"0.78565025",
"0.7848763",
"0.7845353",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.78448755",
"0.7839317",
"0.7839317",
"0.7838409",
"0.78292406",
"0.7817586",
"0.7815622",
"0.78119636",
"0.78094137",
"0.78047764",
"0.78016466",
"0.7781628",
"0.7774492",
"0.7774492",
"0.7774492",
"0.7760961",
"0.7722864",
"0.77213717",
"0.7710119",
"0.7704579",
"0.76964337",
"0.7693399",
"0.76729447",
"0.76507425",
"0.7639614",
"0.76237637",
"0.76226556",
"0.76226556",
"0.76226556",
"0.76181924",
"0.7612502",
"0.7609767",
"0.7599886",
"0.75943834",
"0.7570775",
"0.75607973",
"0.755916"
] |
0.78962016
|
26
|
Sets the full_name of this UserBase. Full name of the user or bot, used for all display purposes.
|
def full_name(self, full_name):
self._full_name = full_name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_full_name(self, user_full_name):\n\n self._user_full_name = user_full_name",
"def full_name(self):\n return \"{} {}\".format(self.user.first_name, self.user.last_name)",
"def full_name(self):\n return self.user.get_full_name() or None",
"def set_fullname(self, value):\n self.fullname = value",
"def get_full_name(self):\n full_name = f'{self.first_name} {self.last_name}' if self.first_name and self.last_name else self.username\n return full_name.strip()",
"def full_name(self, obj: User) -> str:\n return obj.get_full_name()",
"def get_full_name(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip())\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def getname(self, full: bool = False) -> str:\n return self.name_full if full else self.name",
"def full_name_short(self):\n return \"{}. {}\".format(str(self.user.first_name)[:1], self.user.last_name)",
"def get_full_name(self):\n return self.username",
"def get_full_name(self):\n return self.username",
"def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name",
"def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix",
"def full_name(self):\n return self.first_name + \" \" + self.last_name",
"def set_fullname(self, value):\n raise NotImplementedError('set_fullname')",
"def full_name(self):\n \tif self.first_name and self.last_name:\n \t\treturn \"{} {}\".format(self.first_name, self.last_name)",
"def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)",
"def full_name(self) -> str:\r\n\t\tname = f'{self.last_name} {self.first_name}'\r\n\t\tif self.middle_name:\r\n\t\t\tname += ' ' + self.middle_name\r\n\t\treturn name",
"def full_name(self):\n return self._full_name",
"def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username",
"def get_full_name(self):\n # The user is identified by their email address\n return self.first_name+' '+self.last_name",
"def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()",
"def get_full_name(self) -> str:\n return f\"{self.first_name} {self.last_name}\"",
"def FullName(self, default=None):\n return self.data.get('full_name', default)",
"def FullName(self, default=None):\n return self.data.get('full_name', default)",
"def get_full_name(self):\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"",
"def get_displayname(self):\n return self.full_name or self.user.username",
"def get_full_name(self):\n return u'%s %s' % (self.first_name, self.last_name)",
"def get_full_name(self):\n return f'{self.first_name} {self.last_name}'",
"def full_name(self,first_name):\n full_name = self.first_name + ' ' + self.last_name\n return full_name",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def get_full_name(self):\r\n full_name = '%s %s' % (self.first_name, self.last_name)\r\n return full_name.strip()",
"def full_name(self) -> str:\n return self._name",
"def get_full_name(self):\n return \"{0} {1}\".format(self.first_name, self.last_surname)",
"def get_full_name(self):\n return \"{} {}\".format(self.first_name, self.last_name)",
"def get_full_name(self):\n full_name = '{} {}'.format(self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return \"%s %s\" % (self._first_name, self._last_name)",
"def get_full_name(self):\n return self.name+self.last_name",
"def get_full_name(self):\n full_name = \"%s %s\" % (self.firstname, self.lastname)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return self.last_name + self.first_name",
"def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def get_full_name(self):\n return self.name + \" \" + self.email",
"def get_full_name(self):\n return self.first_name+\" \"+self.last_name",
"def get_full_name(self):\n return self.first_name + ' ' + self.last_name",
"def _get_full_name(self):\n if self.middle_name:\n return u'%s %s %s' % (self.first_name, self.middle_name,\n self.last_name)\n else:\n return u'%s %s' % (self.first_name, self.last_name)",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n return self.name",
"def get_full_name(self):\n\t\tfull_name = '%s %s' % (self.first_name, self.last_name)\n\t\treturn full_name.strip()",
"def get_user_display_name(self):\n return self.user.get_full_name() or self.user.get_username()",
"def get_full_name(self):\n\n return self.name",
"def get_full_name(self):\n\n return self.name",
"def full_name(self) -> Optional[str]:\n return pulumi.get(self, \"full_name\")",
"def get_full_name(self):\n full_name = '%s %s' % (self.first_name, self.last_name)\n return full_name.strip()",
"def set_first_name(self, first_name):\n self.first_name = first_name",
"def get_full_name(self, include_title: bool = True) -> str:\n\n full_name = self.user.get_full_name()\n if include_title and self.title:\n title = self.get_title_repr()\n return f\"{full_name}, {title}\"\n return full_name",
"def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()",
"def first_name(self, name):\n self._first_name = name",
"def get_full_name(self):\n return self.name #self is base and it hits name filed",
"def get_username(self):\n full_name = '%s %s' % (self.user.first_name.strip(), self.user.last_name.strip()[0:1])\n if len(full_name.strip()) == 0:\n full_name = self.user.username\n return full_name.strip()",
"def get_full_name(self):\n\n return self.name",
"def get_short_name(self):\n # The user is identified by their email address\n return self.first_name",
"def full_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"full_name\")",
"def get_full_name(self):\n\t\treturn self.email"
] |
[
"0.8363899",
"0.72705364",
"0.71784294",
"0.71450037",
"0.7083193",
"0.69895613",
"0.6981791",
"0.6929003",
"0.6929003",
"0.6929003",
"0.6892608",
"0.6891887",
"0.6880059",
"0.6880059",
"0.68403715",
"0.6780736",
"0.6769605",
"0.6752243",
"0.67450094",
"0.6741219",
"0.6727072",
"0.6701413",
"0.6660611",
"0.6658892",
"0.6656587",
"0.66436857",
"0.66050524",
"0.66050524",
"0.6598258",
"0.6597988",
"0.6597988",
"0.6549547",
"0.6538339",
"0.6537493",
"0.65296555",
"0.65240216",
"0.65240216",
"0.6511711",
"0.6511055",
"0.6495812",
"0.64729744",
"0.6469102",
"0.64686096",
"0.6452701",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6446324",
"0.6436302",
"0.64331424",
"0.64331424",
"0.64286804",
"0.64100695",
"0.64056456",
"0.6402561",
"0.6371809",
"0.6371809",
"0.6371809",
"0.6371809",
"0.6371809",
"0.6351981",
"0.63489324",
"0.63303065",
"0.63303065",
"0.6302956",
"0.628533",
"0.62695634",
"0.6239866",
"0.62237203",
"0.62119144",
"0.62101924",
"0.61742115",
"0.6159556",
"0.61507195",
"0.61373615",
"0.612319"
] |
0.81382096
|
2
|
Gets the is_admin of this UserBase. A boolean specifying whether the user is an organization administrator.
|
def is_admin(self):
return self._is_admin
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_admin(self):\n return self.admin",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_admin(self):\r\n return self.admin",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def is_superuser(self):\n return self.is_admin",
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def isAdmin(user):\n return isUserType(user, Admin)",
"def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def is_admin(self, user):\n return user.name in self.admins",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff",
"def is_admin(self):\n return Role.query.get(2) in self.roles",
"def user_is_admin(user):\n return user in admins",
"def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"",
"def is_admin(user):\n return get_organisations_as_admin(user).count() > 0",
"def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_user_admin(self, user):\n return user == self.created_by",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0",
"def is_admin(self):\n return False",
"def get_is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return \"Could not get the UAC level.\"",
"def is_admin():\n # type: () -> bool\n current_os_name = os.name\n\n # Works with XP SP2 +\n if current_os_name == \"nt\":\n try:\n return IsUserAnAdmin()\n except Exception:\n raise EnvironmentError(\"Cannot check admin privileges\")\n elif current_os_name == \"posix\":\n # Check for root on Posix\n # os.getuid only exists on postix OSes\n # pylint: disable=E1101 (no-member)\n return os.getuid() == 0\n else:\n raise EnvironmentError(\n \"OS does not seem to be supported for admin check. OS: {}\".format(\n current_os_name\n )\n )",
"def is_user_admin(request):\n return request.user.is_superuser",
"def getAdminLevel(self):\n return self.__adminLevel",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def is_not_admin(user):\n return not user.is_superuser",
"def is_staff(self):\r\n return self.is_admin",
"def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin",
"def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")",
"def is_staff(self) -> bool:\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def check_is_admin(current_user):\n return current_user['isAdmin'] == True",
"def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)",
"def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1",
"def is_administrator(self):\n return False",
"def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False",
"def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n\t\treturn self.is_admin",
"def get_editable(self, user):\n return user.get('role') == 'admin'",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def is_admin(user):\n return user.groups.filter(name='Profesores').exists()",
"def is_admin(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {\"username\":username})\n res = cur.fetchone()\n if res[5].lower() == 'admin':\n return True\n return False",
"def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True",
"def get_admins(self):\n admins = User.objects.filter(Q(groups__name=self.admin_group_name()) | Q(is_superuser=True)).distinct()\n return admins",
"def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'",
"def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)",
"def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()",
"def is_local_administrator(self):\n\t\treturn bool(call_sdk_function('PrlUsrCfg_IsLocalAdministrator', self.handle))",
"def user_is_nodeadmin(userobj):\n from .node import Node\n return user_is_basenodeadmin(userobj, Node)",
"def is_billing_admin(self):\n return self._is_billing_admin",
"def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True",
"def administrator_configuration(self) -> Optional[pulumi.Input['AdministratorConfigurationArgs']]:\n return pulumi.get(self, \"administrator_configuration\")",
"def administrator_configuration(self) -> Optional[pulumi.Input['AdministratorConfigurationArgs']]:\n return pulumi.get(self, \"administrator_configuration\")",
"def isAdmin(self, nick):\n\t\tif nick in self.config[\"admins\"]:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def authAdmin(self, email='admin@mail.com'):\n admin = self._createUser(email=email, role=UserType.ADMIN)\n return admin, self._authenticate(admin)",
"def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator",
"def is_admin(self, username): #WORKS\n done = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done == 0: # If query is unsuccessful, username is not an administrator.\n return False\n else:\n return True",
"def administrator_configuration(self) -> Optional['outputs.AdministratorConfigurationResponse']:\n return pulumi.get(self, \"administrator_configuration\")",
"def administrators(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"administrators\")",
"def verify_user(self):\n verified = False\n if self.user.role.role_name == \"Admin\":\n verified = True\n\n return verified",
"def get_admins(self):\n return self.admins_group.user_set.all()",
"def user_is_periodadmin(userobj):\n from .period import Period\n return user_is_basenodeadmin(userobj, Period)",
"def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins",
"def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)",
"def admin_user(self) -> pulumi.Input['LabVirtualMachineAdminUserArgs']:\n return pulumi.get(self, \"admin_user\")",
"def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)",
"def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True",
"def _get_admin_status(self):\n return self.__admin_status",
"def is_admin(author):\n if str(author).lower() in config[\"admins\"]:\n return True\n return False",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False",
"def get_editable(self, user):\n if self.doc.get('locked'): return False\n return user.get('role') in ('admin', 'manager', 'engineer')",
"def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)",
"def is_admin(func):\n\n @wraps(func)\n def decorated_function(*args, **kwargs):\n from .base_validator import ValidationError\n user = request.user\n if user.is_admin == IsAdmin.yes:\n return func(*args, **kwargs)\n raise ValidationError(\n {'message': 'You are not authorized to access this page'},\n 403\n )\n\n return decorated_function",
"def is_admin(member: Union[discord.Member, discord.User]) -> bool:\n if not isinstance(member, discord.Member):\n return False\n return any([role.id in config.BOT_ADMIN_ROLES for role in member.roles])",
"def get_admin_users(self):\r\n try:\r\n users = self.list_all(\"users\")\r\n users_admin = [user for user in users if user[\"role\"] == \"admin\"]\r\n return users_admin\r\n except PDClientError as e:\r\n raise e",
"def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))"
] |
[
"0.7881886",
"0.77665955",
"0.77486885",
"0.7689977",
"0.7461093",
"0.73404866",
"0.7156036",
"0.71471244",
"0.71004605",
"0.7084834",
"0.7068",
"0.7034636",
"0.70345366",
"0.6991166",
"0.69782645",
"0.6957708",
"0.6923771",
"0.6915408",
"0.6905435",
"0.6894784",
"0.6880022",
"0.6856704",
"0.6856704",
"0.6811209",
"0.67997295",
"0.6794856",
"0.6762388",
"0.67253304",
"0.672264",
"0.668783",
"0.6672917",
"0.66294605",
"0.662668",
"0.6617968",
"0.66058075",
"0.65774584",
"0.6567952",
"0.6558772",
"0.654587",
"0.6540436",
"0.65100056",
"0.6466288",
"0.64316654",
"0.6430916",
"0.64253",
"0.6416158",
"0.6416158",
"0.6389208",
"0.6385092",
"0.63681513",
"0.63638496",
"0.6329462",
"0.6304656",
"0.6277555",
"0.62291026",
"0.61590403",
"0.61590403",
"0.61590403",
"0.6156403",
"0.61358374",
"0.6134993",
"0.6134993",
"0.6097937",
"0.6064527",
"0.6023453",
"0.60217667",
"0.60196763",
"0.6017919",
"0.59937394",
"0.5979973",
"0.5926372",
"0.5922049",
"0.5916135",
"0.5915625",
"0.5915625",
"0.58392316",
"0.58262056",
"0.5790089",
"0.5778718",
"0.57667536",
"0.57594144",
"0.57349354",
"0.57338214",
"0.57179713",
"0.5689478",
"0.5688229",
"0.56774837",
"0.56703335",
"0.56670004",
"0.56533927",
"0.56511706",
"0.5643922",
"0.56419694",
"0.56358856",
"0.5626028",
"0.561838",
"0.5617717",
"0.5609841",
"0.5594697"
] |
0.7862511
|
1
|
Sets the is_admin of this UserBase. A boolean specifying whether the user is an organization administrator.
|
def is_admin(self, is_admin):
self._is_admin = is_admin
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_admin(self):\n return self.admin",
"def is_admin(self):\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin",
"def is_admin(self):\r\n return self.admin",
"def super_admin(self, super_admin):\n\n self._super_admin = super_admin",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)",
"def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def is_admin(self, user):\n return user.name in self.admins",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def is_superuser(self):\n return self.is_admin",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False",
"def is_admin(self):\n return False",
"def isAdmin(user):\n return isUserType(user, Admin)",
"def is_user_admin(self, user):\n return user == self.created_by",
"def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def is_staff(self) -> bool:\n return self.is_admin",
"def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role",
"def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)",
"def is_staff(self):\r\n return self.is_admin",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)",
"def is_staff(self):\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def user_is_admin(user):\n return user in admins",
"def _set_admin_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"admin-status\", rest_name=\"admin-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"admin_status must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"admin-status\", rest_name=\"admin-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__admin_status = t\n if hasattr(self, '_set'):\n self._set()",
"def set_is_staff(self, role):\n self.is_staff = (role != User.ROLE_USER)",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def is_admin(user):\n return get_organisations_as_admin(user).count() > 0",
"def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff",
"def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")",
"def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()",
"def is_administrator(self):\n return False",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)",
"def is_not_admin(user):\n return not user.is_superuser",
"def admin_email(self, admin_email):\n\n self._admin_email = admin_email",
"def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1",
"def is_staff(self):\n\t\treturn self.is_admin",
"def is_user_admin(request):\n return request.user.is_superuser",
"def check_is_admin(current_user):\n return current_user['isAdmin'] == True",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin",
"def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)",
"def promote_user(self, username):\n parser_promote.add_argument('isadmin', choices=[\"True\", \"False\"],\n required=True, nullable=False,\n help=\"(Accepted values: True, False)\"\n )\n args = parser_promote.parse_args()\n isAdmin = request.json.get('isadmin')\n\n query = \"\"\"UPDATE users SET isadmin=%s WHERE username=%s\"\"\"\n values = isAdmin, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True",
"def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0",
"def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)",
"def is_admin():\n # type: () -> bool\n current_os_name = os.name\n\n # Works with XP SP2 +\n if current_os_name == \"nt\":\n try:\n return IsUserAnAdmin()\n except Exception:\n raise EnvironmentError(\"Cannot check admin privileges\")\n elif current_os_name == \"posix\":\n # Check for root on Posix\n # os.getuid only exists on postix OSes\n # pylint: disable=E1101 (no-member)\n return os.getuid() == 0\n else:\n raise EnvironmentError(\n \"OS does not seem to be supported for admin check. OS: {}\".format(\n current_os_name\n )\n )",
"def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def set_admins(self, admin_emails):\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.ADMIN_EMAILS.name: admin_emails,\n }\n }, csrf_token)\n self.logout()",
"def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"",
"def is_admin(self):\n return Role.query.get(2) in self.roles",
"def set_is_ai(self, is_ai):\n self.__is_ai = is_ai",
"def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def authAdmin(self, email='admin@mail.com'):\n admin = self._createUser(email=email, role=UserType.ADMIN)\n return admin, self._authenticate(admin)",
"def set_visible(self, is_visible):\n self._data['is_visible'] = 1 if is_visible else 0",
"def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield",
"def set_is_portal_enabled(self, is_portal_enabled):\n self.is_portal_enabled = is_portal_enabled",
"def user_is_nodeadmin(userobj):\n from .node import Node\n return user_is_basenodeadmin(userobj, Node)",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def login_as_admin():\n users.loginAsUser(\n config.VDC_ADMIN_USER, config.VDC_ADMIN_DOMAIN,\n config.VDC_PASSWORD, filter=False\n )\n return True",
"def add_admin():\n admin_role = Role.query.filter_by(permissions=0xFF).first()\n admin = User.query.filter_by(email=current_app.config['PILI_ADMIN']).first()\n if not admin:\n admin_user = User(\n email=current_app.config['PILI_ADMIN'],\n username=current_app.config['PILI_ADMIN_NAME'],\n password=generate_password(10),\n role=admin_role,\n confirmed=True,\n )\n db.session.add(admin_user)\n db.session.commit()",
"async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:\n is_admin = await auth.is_server_admin(requester)\n if not is_admin:\n raise AuthError(HTTPStatus.FORBIDDEN, \"You are not a server admin\")",
"def set_cuda(self, is_cuda):\n self.is_cuda = is_cuda",
"def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False",
"def IsPrivilegedUser(user_email, is_admin):\n return is_admin or (user_email and user_email.endswith('@google.com'))",
"def is_billing_admin(self):\n return self._is_billing_admin",
"def create_user_as_admin(self, *args, **kwargs):\n profile = self.create_user(*args, **kwargs)\n profile.make_administrator()\n return profile",
"def get_editable(self, user):\n return user.get('role') == 'admin'",
"def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )",
"def create_admin_user(self):\n\n sys.stdout.write('creating admin user...'.ljust(LJ_SIZE))\n\n User.objects.create_superuser(username=ADMIN_ACCOUNT_NAME, password=ADMIN_ACCOUNT_PASSWORD, email='')\n self.print_ok()\n\n return self.admin_user_exists()",
"def set_is_default_org(self, is_default_org):\n self.is_default_org = is_default_org",
"def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())",
"def admin_host(self, admin_host):\n\n self._admin_host = admin_host",
"def is_admin(func):\n\n @wraps(func)\n def decorated_function(*args, **kwargs):\n from .base_validator import ValidationError\n user = request.user\n if user.is_admin == IsAdmin.yes:\n return func(*args, **kwargs)\n raise ValidationError(\n {'message': 'You are not authorized to access this page'},\n 403\n )\n\n return decorated_function",
"def is_admin(author):\n if str(author).lower() in config[\"admins\"]:\n return True\n return False",
"def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))",
"def is_guest(self, is_guest):\n\n self._is_guest = is_guest",
"def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()"
] |
[
"0.83135295",
"0.66889644",
"0.6337106",
"0.626878",
"0.62481576",
"0.62481576",
"0.62017304",
"0.6183351",
"0.6072974",
"0.60349",
"0.601608",
"0.59858966",
"0.59276825",
"0.58737713",
"0.58411723",
"0.5830839",
"0.5752557",
"0.5735811",
"0.5708521",
"0.57063186",
"0.57017344",
"0.5699304",
"0.5573182",
"0.556661",
"0.5545417",
"0.55386096",
"0.5518142",
"0.5484885",
"0.544499",
"0.54327714",
"0.5425201",
"0.5417353",
"0.5411021",
"0.5357822",
"0.5357822",
"0.53223747",
"0.53129995",
"0.5297404",
"0.52830946",
"0.52372706",
"0.52147216",
"0.51989967",
"0.51938504",
"0.5192063",
"0.51917946",
"0.51874334",
"0.5172846",
"0.51675886",
"0.5149245",
"0.51470554",
"0.51461387",
"0.5140382",
"0.5131102",
"0.5124785",
"0.5124785",
"0.5123895",
"0.51081836",
"0.51081836",
"0.51081836",
"0.509765",
"0.5084274",
"0.5075404",
"0.50729585",
"0.5053613",
"0.50269824",
"0.5024025",
"0.5020295",
"0.500671",
"0.5000754",
"0.49979752",
"0.49939662",
"0.49877",
"0.49543443",
"0.4952015",
"0.49419388",
"0.49311048",
"0.49089816",
"0.48677438",
"0.48616657",
"0.48487315",
"0.48487315",
"0.48396468",
"0.4824461",
"0.48206255",
"0.480447",
"0.477122",
"0.4768402",
"0.47673348",
"0.47598317",
"0.47536376",
"0.47484618",
"0.47350734",
"0.47309124",
"0.47304952",
"0.47196782",
"0.47121826",
"0.47035587",
"0.47022885",
"0.4692613",
"0.4691667"
] |
0.8068177
|
1
|
Gets the is_owner of this UserBase.
|
def is_owner(self):
return self._is_owner
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_user_is_owner(self):\n return self._tag == 'user_is_owner'",
"def get_owner_object(self):\n return False",
"def isowner(self, o):\n return self._owner is o",
"def get_owner(self):\n return self.__owner",
"def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None",
"def get_owner(self):\n return self.settings.get(\"owner\", None)",
"def get_owner(self, obj):\n return obj.user.username",
"def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])",
"def getOwner(self):\n return self.__owner",
"def getOwner(self):\n return self.__owner",
"def is_current_session_owner(self):\n\t\treturn bool(call_sdk_function('PrlAcl_IsCurrentSessionOwner', self.handle))",
"def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )",
"def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")",
"def owner_type(self) -> str:\n return pulumi.get(self, \"owner_type\")",
"def getOwner(self):\r\n return self.owner",
"def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()",
"def owner_id(self):\n return self._owner_id",
"def GetOwnerManager(self):\r\n\r\n return self._owner_mgr",
"def bot_owner_id(self):\n return self._bot_owner_id",
"def is_owner(self, is_owner):\n\n self._is_owner = is_owner",
"def user(self):\n return self.owner.user",
"def owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner\")",
"def is_active_user(self):\n\n return self.is_active",
"def get_owner_object(self):\n return None",
"def get_owner_object(self):\n return None",
"def get_owner_object(self):\n return None",
"def get_owner(self):\n owner = gdef.PSID()\n lpbOwnerDefaulted = gdef.BOOL()\n winproxy.GetSecurityDescriptorOwner(self, owner, lpbOwnerDefaulted)\n # Return None of owner is NULL\n return owner or None",
"def business_owner(self):\n return self._business_owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")",
"def owner(self):\n return Organization.objects.get(id=self.owner_id)",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def owner(self):\n return self.__owner",
"def is_owner(self, author):\n return not self.server or author == self.server.owner",
"def owner(self):\n answer = self._call('owner')\n return answer.owner",
"def owner_id(self) -> str:\n return self.__owner_id",
"def has_ownership(self):\n user = self.request.user\n object = self.get_object()\n if object.owned_by(user):\n return True\n else:\n return False",
"def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId",
"def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def is_application_owner(user, application):\n\n if application.auth_user == user:\n \"\"\"Check auth_user first\"\"\"\n return True\n\n elif not application.auth_user and application.user_id:\n \"\"\"Check Auth0 for legacy applications\"\"\"\n if application.user_id == get_auth0_user_id_by_email(user.email):\n return True\n else:\n return False\n\n else:\n \"\"\"Otherwise false\"\"\"\n return False",
"def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True",
"def owner(self) -> str:\n return self._owner",
"def owner_id(self) -> int:\n return self.proto.owner",
"def get_owner_object(self):\n return self",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def is_owned_by(self, user):\n return user and user.id == self.user_id",
"def is_still_owner(self):\n raise tooz.NotImplemented",
"def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")",
"def calc_owner(self, activation):\n from django.contrib.auth import get_user_model\n\n owner = self._owner\n if callable(owner):\n owner = owner(activation)\n elif isinstance(owner, dict):\n owner = get_user_model()._default_manager.get(**owner)\n return owner",
"def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user",
"def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()",
"def available(self, o):\n return not self.locked() or self.isowner(o)",
"def active(self):\n return self.owner.active",
"def owner_info(self) -> pulumi.Output['outputs.UserInfoResponse']:\n return pulumi.get(self, \"owner_info\")",
"def is_active(self):\n return self.status == ACTIVE_USER",
"def owner(self):\n if self.get_team():\n return self.get_team()\n return None",
"def locked(self):\n return self._owner is not None",
"def owner(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"owner\")",
"def owner_of_project(self, user_id, project_id):\n\n res = self.conn.cursor().execute(\"SELECT * FROM projects WHERE owner=? AND id=?\", (user_id, project_id,))\n result = res.fetchone()\n if not result:\n return False\n return True",
"def technical_owner(self):\n return self._technical_owner",
"def user_can_edit(self, user):\n return user == self.owner",
"def get_owner_name(self):\n\t\treturn call_sdk_function('PrlAcl_GetOwnerName', self.handle)",
"def user(self):\n return self._forced_user",
"def is_usermanager(self):\n return False",
"async def cog_check(self, ctx:utils.Context):\n\n if ctx.author.id in self.bot.config['owners']:\n return True\n raise commands.NotOwner",
"def ownerOfObject(obj):\n udb, uid = obj.getOwnerTuple()\n root = obj.getPhysicalRoot()\n udb = root.unrestrictedTraverse(udb, None)\n if udb is None:\n user = SpecialUsers.nobody\n else:\n user = udb.getUserById(uid, None)\n if user is None:\n user = SpecialUsers.nobody\n else:\n user = user.__of__(udb)\n return user",
"def logged_in(self):\n return self.user is not None",
"def is_superuser(self):\n return self.is_admin",
"def is_active(self):\n return self.user.is_active",
"def username(self) -> str:\n return self._data['Owner']",
"def is_organization(self):\n return self.user_id is None",
"def get_owner(self, property_name):\n\n property_owner = self.db.read_value(property_name, \"owner\")\n return property_owner",
"def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")",
"def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")",
"def clean_owner(self):\n username = self.cleaned_data['owner']\n owner = User.objects.filter(username=username).first()\n if owner is None:\n raise forms.ValidationError(\n _('User %(username)s does not exist'),\n params={'username': username},\n )\n if self.organization.owners.filter(username=username).exists():\n raise forms.ValidationError(\n _('User %(username)s is already an owner'),\n params={'username': username},\n )\n return owner",
"def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")",
"def root_user_status(self, instance):\n return instance.root_user_status()",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def is_user_admin(self, user):\n return user == self.created_by",
"def has_object_permission(self, request, view, obj):\n owner_field = getattr(view, \"owner_field\", None)\n\n if owner_field is None:\n # if no owner_field is specified, the object itself is compared\n owner = obj\n else:\n # otherwise we lookup the owner by the specified field\n owner = getattr(obj, owner_field)\n\n return owner == request.user",
"def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False",
"def calc_owner_permission(self, activation):\n owner_permission = self._owner_permission\n if callable(owner_permission):\n owner_permission = owner_permission(activation)\n return owner_permission",
"def is_personal(self):\n return self.user_id is not None",
"def is_usermanager(self):\n return self.can(Permission.CRUD_USERS)",
"def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))",
"def get_current_property_owner(self, player_name, movement_manager):\n current_location_num = movement_manager.get_current_location_value(player_name)\n current_prop_owner = self.db.specific_read_value(current_location_num, \"board_position\", \"owner\")\n return current_prop_owner",
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def get_owner(self):\n return self._creatorsHeap[0][1]",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def check_user_has_owner_clearance(self, userName, userGroup):\n dataBase = self.read_database()\n owners = dataBase['userGroups'][userGroup]['owners']\n return userName in owners",
"def get_is_self(self, obj: Profile) -> bool:\n request: HttpRequest = self.context.get('request')\n if request:\n if request.user.is_authenticated:\n return obj == request.user.profile\n return False"
] |
[
"0.7645217",
"0.7104894",
"0.6914149",
"0.6853681",
"0.67683494",
"0.6743861",
"0.66868156",
"0.65692985",
"0.6566306",
"0.6566306",
"0.6566118",
"0.6552337",
"0.65283376",
"0.6492521",
"0.64828247",
"0.6478465",
"0.64477557",
"0.6443881",
"0.64351815",
"0.6400954",
"0.63200957",
"0.6316049",
"0.6304822",
"0.62856317",
"0.62856317",
"0.62856317",
"0.62702477",
"0.6268768",
"0.6229864",
"0.6229864",
"0.6229864",
"0.6229864",
"0.6229864",
"0.619663",
"0.61916804",
"0.6189301",
"0.61670065",
"0.6151579",
"0.61350536",
"0.61188537",
"0.61114866",
"0.6104384",
"0.6101447",
"0.6090168",
"0.6077844",
"0.607407",
"0.6064968",
"0.6051766",
"0.6023857",
"0.5968121",
"0.5968121",
"0.59678966",
"0.5943112",
"0.59151775",
"0.59000033",
"0.5867639",
"0.58636755",
"0.58630335",
"0.58323437",
"0.58117354",
"0.5778014",
"0.5769936",
"0.5741181",
"0.5720812",
"0.57020634",
"0.56848574",
"0.56754065",
"0.56665003",
"0.5651466",
"0.56468654",
"0.56422234",
"0.563383",
"0.5632323",
"0.56198066",
"0.5612379",
"0.5607492",
"0.5599544",
"0.5597987",
"0.5576267",
"0.5576267",
"0.55721414",
"0.5541145",
"0.5521161",
"0.54844785",
"0.54844785",
"0.5478166",
"0.5457624",
"0.54376215",
"0.54371023",
"0.5432628",
"0.54304534",
"0.5426097",
"0.5425526",
"0.54128313",
"0.5405401",
"0.53623897",
"0.5360249",
"0.5359571",
"0.5355872",
"0.5348534"
] |
0.79326665
|
0
|
Sets the is_owner of this UserBase.
|
def is_owner(self, is_owner):
self._is_owner = is_owner
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_owner(self, owner):\n self.__owner = owner",
"def set_owner(self, owner):\n self.settings[\"owner\"] = owner",
"def is_user_is_owner(self):\n return self._tag == 'user_is_owner'",
"def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner",
"def owner_id(self, owner_id):\n\n self._owner_id = owner_id",
"def owner_id(self, owner_id):\n self._owner_id = owner_id",
"def owner_type(self, owner_type):\n\n self._owner_type = owner_type",
"def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)",
"def owner(self, owner: str):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def is_owner(self):\n return self._is_owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def scope_owner(self, scope_owner):\n\n self._scope_owner = scope_owner",
"def owner_reference(self, owner_reference):\n\n self._owner_reference = owner_reference",
"def set_owner(self, owner: Optional[\"STACObject_Type\"]) -> \"Link\":\n self.owner = owner\n return self",
"def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id",
"def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id",
"def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)",
"def isowner(self, o):\n return self._owner is o",
"def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner",
"def transfer_ownership(self, user):\n new_owner = get_user_model().objects.filter(is_active=True) \\\n .get(pk=user.pk)\n self.owner = new_owner",
"def business_owner(self, business_owner):\n\n self._business_owner = business_owner",
"def pre_save(self, obj):\n obj.owner = self.request.user",
"def set_entity_owner_account_type(self, username, account_type):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_SET_ENTITY_OWNER_ACCOUNT_TYPE + ':' + username + '|' + account_type)",
"def save_model(self, request, obj, form, change):\n try:\n owner = form.instance.owner\n except models.Application.owner.RelatedObjectDoesNotExist:\n form.instance.owner = request.user\n\n super().save_model(request, obj, form, change)",
"def technical_owner(self, technical_owner):\n\n self._technical_owner = technical_owner",
"def setOwnerPassword(self,value):\n self.PDFreactorConfiguration.in1[\"ownerPassword\"] = value",
"def get_owner_object(self):\n return False",
"def save(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n if not user:\n raise ValueError(\"User not present in the model\")\n if not hasattr(self, 'owner'):\n self.owner = user\n elif not self.can_be_edited(user):\n raise ValueError(\"User can't edit the model\")\n self.last_updated_by = user\n self.last_updated_datetime = datetime.datetime.now()\n super(BaseModel, self).save(*args, **kwargs)",
"def set_user(self, user):\r\n self.user = user",
"def owner_type(self) -> str:\n return pulumi.get(self, \"owner_type\")",
"def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id",
"def is_still_owner(self):\n raise tooz.NotImplemented",
"def ownercheck(self, userhost):\n if self.cfg and self.cfg.owner:\n if userhost in self.cfg.owner: return True\n return False",
"def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None",
"def is_owner_or_privileged_user(obj_user, request):\n return (\n obj_user == request.user or request.user.is_superuser or is_admin_user(request)\n )",
"def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)",
"def is_owner(self, author):\n return not self.server or author == self.server.owner",
"def bot_owner_id(self):\n return self._bot_owner_id",
"def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email",
"def get_owner(self, obj):\n return obj.user.username",
"def owner_id(self):\n return self._owner_id",
"def set_is_ai(self, is_ai):\n self.__is_ai = is_ai",
"def set_user(self, user):\n self._user = user",
"def test_is_owner_inherited_and_local(self):\n self.make_assignment(self.project, self.user_alice, self.role_owner)\n self.assertTrue(self.project.is_owner(self.user_alice))",
"def owner_id(self) -> str:\n return self.__owner_id",
"def is_owner(self, resource: Model) -> bool:\n\n try:\n self.raise_for_ownership(resource)\n except SupersetSecurityException:\n return False\n\n return True",
"def set_user(self, user: User):\n self.__user = user",
"def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)",
"def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")",
"def enable_root_user(self, instance):\n return instance.enable_root_user()",
"def set_user(self, user_model):\n\n self.user_model = user_model\n return self",
"def set_owner(plugin_id, username, logger, client):\n plugin = client.plugins.set_owner(plugin_id, username)\n logger.info('Plugin `%s` is now owned by user `%s`.',\n plugin_id, plugin.get('created_by'))",
"def is_owned_by(self, user):\n return user and user.id == self.user_id",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def add_owner(self, user):\n user_in = user.get_groups()\n member = False\n for group in user_in:\n if self.usergroup_node == group.usergroup_node:\n member = True\n ownership = Relationship(user.get(), 'owns', self.usergroup_node)\n graph.create(ownership)\n if not member:\n membership = Relationship(user.get(), 'in', self.usergroup_node)\n graph.create(membership)\n return self.usergroup_node",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active",
"def set_file_owner(host, fqpath, user):\n command = \"chown %s %s\" % (user, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chown failed: %s' % rerr)\n return False",
"def set_ownership(self):\n\n os.chmod(os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id), 0600)",
"def is_user_event(self, is_user_event):\n self._is_user_event = is_user_event",
"def get_owner(self):\n return self.__owner",
"def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid",
"def possessed_by(self, other):\r\n self.owner = other",
"def get_owner(self):\n return self.settings.get(\"owner\", None)",
"def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])",
"def user_can_edit(self, user):\n return user == self.owner",
"def set_main_user_options(self):\n if self.user:\n self.template_values[self._tp_logoutLink] = True\n self.template_values[self._tp_userName] = self.user.username\n else:\n self.template_values[self._tp_loginLink] = True\n self.template_values[self._tp_signupLink] = True",
"def is_current_session_owner(self):\n\t\treturn bool(call_sdk_function('PrlAcl_IsCurrentSessionOwner', self.handle))",
"def is_usermanager(self):\n return False",
"def as_user_to_self(self, as_user_to_self):\n\n self._as_user_to_self = as_user_to_self",
"def git_username_user_attribute(self, git_username_user_attribute):\n self._git_username_user_attribute = git_username_user_attribute",
"def CAN_ASSIGN_OWNER(article, user): # pylint: disable=invalid-name\r\n return _is_staff_for_article(article, user)",
"def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")",
"def check_owner(data=None, **kw):\n if data and 'owner_id' in data and not data['owner_id'] == current_user.id:\n raise ProcessingException(description=\"No write privileges\",\n code=401)",
"def manage_changeOwnershipType(\n self,\n explicit=1,\n RESPONSE=None,\n REQUEST=None\n ):\n old = getattr(self, '_owner', None)\n if explicit:\n if old is not None:\n return\n owner = self.getOwnerTuple()\n if owner is not None and owner is not UnownableOwner:\n self._owner = owner\n else:\n if old is None:\n return\n new = aq_get(aq_parent(self), '_owner', None, 1)\n _m = object()\n if old is new and (self.__dict__.get('_owner', _m) is not _m):\n del self._owner\n\n if RESPONSE is not None:\n RESPONSE.redirect(REQUEST['HTTP_REFERER'])",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def which_owner(self):\n LOGGER.debug(self.details)\n for override_function, override_map in self.team_owner_overrides.items():\n for override_key, override_team in override_map.items():\n if override_key in self.details[override_function]: # pylint: disable=unsupported-membership-test\n self.details[\"owner\"] = override_team\n break\n\n # for chassis and blades in d42, use their role to determine ownership\n if re.match('^c[0-9]*b[0-9]*', self.details['function']) is not None:\n self.load_from_device42()\n\n if self.details[\"owner\"] == \"team-unclassified\":\n for owner, teamregex in self.team_ownership_regexes.items():\n if re.search(teamregex, self.details[\"function\"]):\n self.details[\"owner\"] = owner\n break",
"def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")",
"def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()",
"def set_owner_name(self, data, **kwargs):\n try:\n git_url = GitURL.parse(data[\"git_url\"])\n except UnicodeError as e:\n raise ValidationError(\"`git_url` contains unsupported characters\") from e\n except ConfigurationError as e:\n raise ValidationError(\"Invalid `git_url`\") from e\n\n if git_url.owner is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"owner\"] = git_url.owner\n\n if git_url.name is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"name\"] = git_url.name\n data[\"slug\"] = normalize_to_ascii(data[\"name\"])\n\n return data",
"def block_owner_deletion(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"block_owner_deletion\")",
"def test__put_owner_into():\n user = User.precreate(202211270016)\n team = Team.precreate(202211270017)\n \n for input_value, defaults, expected_output in (\n (ZEROUSER, False, {}),\n (ZEROUSER, True, {'owner': None, 'team': None}),\n (user, True, {'owner': user.to_data(defaults = True, include_internals = True), 'team': None}),\n (team, True, {'owner': team.to_data_user(), 'team': team.to_data(defaults = True, include_internals = True)}),\n ):\n output = put_owner_into(input_value, {}, defaults)\n vampytest.assert_eq(output, expected_output)",
"def form_valid(self, form):\n self.handle_balance_update(form)\n\n form.instance.owner = self.request.user\n return super().form_valid(form)",
"def manage_owner(owner_id):\n\n return _get_owner_service().get_owner(owner_id)",
"def set_as_type_user(self):\n self.type = MessageTypes.USER",
"def set_username(self, value):\n raise NotImplementedError('set_username')",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def set_owner(self, role, check_mode=True):\n query = 'ALTER SUBSCRIPTION %s OWNER TO \"%s\"' % (self.name, role)\n return self.__exec_sql(query, check_mode=check_mode)",
"def owner_id(self) -> int:\n return self.proto.owner",
"def setCurrentUser(self, provider):\n pass",
"def owner(self) -> str:\n return self._owner",
"def getOwner(self):\n return self.__owner",
"def getOwner(self):\n return self.__owner"
] |
[
"0.6996318",
"0.69127315",
"0.6491776",
"0.64011246",
"0.63992107",
"0.63773704",
"0.636248",
"0.63547474",
"0.63318616",
"0.6193567",
"0.6193567",
"0.6193567",
"0.6193567",
"0.6184652",
"0.61401767",
"0.61401767",
"0.61401767",
"0.61401767",
"0.6025569",
"0.59794116",
"0.5975996",
"0.5959993",
"0.59532636",
"0.5921516",
"0.5841493",
"0.5720495",
"0.5709794",
"0.56894433",
"0.5669848",
"0.55898285",
"0.5524713",
"0.5308064",
"0.52940637",
"0.527658",
"0.5250455",
"0.5245117",
"0.5224063",
"0.52238196",
"0.5204466",
"0.5199849",
"0.5144426",
"0.51237136",
"0.51109666",
"0.51104283",
"0.50826746",
"0.50552654",
"0.5053018",
"0.5043372",
"0.50413716",
"0.50360423",
"0.50198525",
"0.49962312",
"0.49961227",
"0.499224",
"0.49857652",
"0.496023",
"0.49303004",
"0.49272606",
"0.49164817",
"0.49146286",
"0.49102524",
"0.48703888",
"0.48641297",
"0.48548356",
"0.4851623",
"0.48453927",
"0.48416826",
"0.48325476",
"0.48066133",
"0.48049283",
"0.48015317",
"0.4793861",
"0.47816345",
"0.47668082",
"0.47652966",
"0.4760426",
"0.47190773",
"0.47133267",
"0.47053608",
"0.47052795",
"0.4702698",
"0.46895292",
"0.46888348",
"0.4673089",
"0.46728444",
"0.46696627",
"0.46643227",
"0.46639714",
"0.46618327",
"0.46568877",
"0.46490335",
"0.46456543",
"0.46400103",
"0.46262342",
"0.46239883",
"0.46231693",
"0.46223533",
"0.46114004",
"0.46077585",
"0.46077585"
] |
0.8064309
|
0
|
Gets the is_billing_admin of this UserBase.
|
def is_billing_admin(self):
return self._is_billing_admin
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin",
"def is_admin(self):\n return self.admin",
"def is_admin(self):\r\n return self.admin",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def billing(self):\n return self._billing",
"def check_is_admin(current_user):\n return current_user['isAdmin'] == True",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False",
"def default_billing(self):\n return self._default_billing",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def is_superuser(self):\n return self.is_admin",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def is_admin(self):\n return False",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]",
"def is_admin(self, user):\n return user.name in self.admins",
"def is_user_admin(request):\n return request.user.is_superuser",
"def user_is_admin(user):\n return user in admins",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def is_user_admin(self, user):\n return user == self.created_by",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True",
"def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")",
"def billing_info(self):\n return self._billing_info",
"def isAdmin(user):\n return isUserType(user, Admin)",
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"",
"def is_not_admin(user):\n return not user.is_superuser",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'",
"def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def is_administrator(self):\n return self.can(Permission.ADMIN)",
"def is_admin(self):\n return Role.query.get(2) in self.roles",
"def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_group_admin_group(self):\n groups = self['__store']\n return self.group_id in groups.get_group_admin_group_ids()",
"def supports_type_admin(self):\n return 'supports_type_admin' in profile.SUPPORTS",
"def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False",
"def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0",
"def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)",
"def get_is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return \"Could not get the UAC level.\"",
"def is_admin():\n # type: () -> bool\n current_os_name = os.name\n\n # Works with XP SP2 +\n if current_os_name == \"nt\":\n try:\n return IsUserAnAdmin()\n except Exception:\n raise EnvironmentError(\"Cannot check admin privileges\")\n elif current_os_name == \"posix\":\n # Check for root on Posix\n # os.getuid only exists on postix OSes\n # pylint: disable=E1101 (no-member)\n return os.getuid() == 0\n else:\n raise EnvironmentError(\n \"OS does not seem to be supported for admin check. OS: {}\".format(\n current_os_name\n )\n )",
"def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)",
"def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False",
"def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role",
"def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins",
"def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'",
"def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff",
"def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True",
"def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)",
"def is_customer(self):\n return self.user_type == 'C'",
"def is_staff(self):\n return self.is_admin",
"def is_staff(self):\n return self.is_admin",
"def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone",
"def is_staff(self):\r\n return self.is_admin",
"def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)",
"def is_administrator(self):\n return False",
"def _get_admin_status(self):\n return self.__admin_status",
"def isAdmin(self, nick):\n\t\tif nick in self.config[\"admins\"]:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False",
"def aof_backup_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"aof_backup_enabled\")",
"def is_staff(self) -> bool:\n return self.is_admin",
"def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")",
"def billing_account(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"billing_account\")",
"def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1",
"def get_billing_state(self):\n if self.billing_address and self.billing_address.state:\n return self.billing_address.state\n else:\n sub_prods = SubscriptionProduct.objects.filter(subscription=self)\n addresses = [sp.address for sp in sub_prods]\n if addresses:\n return addresses[0].state\n else:\n return \"\"",
"def getAdminLevel(self):\n return self.__adminLevel",
"def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))",
"def get_editable(self, user):\n return user.get('role') == 'admin'",
"def is_admin(user):\n return get_organisations_as_admin(user).count() > 0",
"def user_allow_credit(self):\n try:\n return self.user.creditAllowed()\n except AttributeError:\n return False",
"def billing_contact(self):\n return self._billing_contact",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def can_save(self, user_obj):\n if user_obj.is_superuser:\n return True\n elif self.parentnode:\n return self.parentnode.is_admin(user_obj)\n else:\n return False",
"def user_is_periodadmin(userobj):\n from .period import Period\n return user_is_basenodeadmin(userobj, Period)",
"def get_billing_name(self):\n if self.billing_name:\n return self.billing_name\n else:\n return self.contact.name",
"def last_active_admin(self):\n number = User.objects.filter(role=User.ROLE_ADMIN,\n is_active=True).count()\n if number > 1:\n return False\n else:\n return True",
"def is_admin(user):\n return user.groups.filter(name='Profesores').exists()",
"def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)",
"def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")",
"def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")",
"def billing_account(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"billing_account\")",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def is_admin(member: Union[discord.Member, discord.User]) -> bool:\n if not isinstance(member, discord.Member):\n return False\n return any([role.id in config.BOT_ADMIN_ROLES for role in member.roles])",
"def is_admin(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {\"username\":username})\n res = cur.fetchone()\n if res[5].lower() == 'admin':\n return True\n return False",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def user_is_nodeadmin(userobj):\n from .node import Node\n return user_is_basenodeadmin(userobj, Node)"
] |
[
"0.75745505",
"0.6962725",
"0.6885912",
"0.6775686",
"0.67065483",
"0.6678298",
"0.6678298",
"0.66013443",
"0.6554211",
"0.6432071",
"0.6286309",
"0.6203971",
"0.61566216",
"0.6140026",
"0.61183566",
"0.60726625",
"0.6026393",
"0.6017969",
"0.59559476",
"0.59489256",
"0.59279305",
"0.5905573",
"0.5896796",
"0.58894575",
"0.58808905",
"0.5857964",
"0.57998943",
"0.57950556",
"0.57774943",
"0.57565796",
"0.57201725",
"0.5704812",
"0.5687596",
"0.56330943",
"0.56149924",
"0.5608429",
"0.5607708",
"0.5594533",
"0.55827296",
"0.55827296",
"0.55215824",
"0.55154854",
"0.55154854",
"0.5488341",
"0.5471663",
"0.54676867",
"0.5467015",
"0.5458316",
"0.5451712",
"0.5450482",
"0.54457253",
"0.5438174",
"0.53899056",
"0.5385222",
"0.53830814",
"0.5335984",
"0.5321955",
"0.53089124",
"0.529626",
"0.528767",
"0.5267025",
"0.5245355",
"0.5244483",
"0.5243691",
"0.523471",
"0.523471",
"0.52315676",
"0.5230993",
"0.5230514",
"0.5218111",
"0.52083856",
"0.520562",
"0.51873475",
"0.5183856",
"0.5182994",
"0.5182994",
"0.5176955",
"0.51729465",
"0.51703227",
"0.5158832",
"0.51506",
"0.5132513",
"0.5126795",
"0.5122662",
"0.5115947",
"0.5115947",
"0.51142716",
"0.5105315",
"0.510328",
"0.510286",
"0.50933534",
"0.50766015",
"0.50766015",
"0.50766015",
"0.50278527",
"0.50247705",
"0.5023307",
"0.5008781",
"0.5008781",
"0.50085634"
] |
0.8872368
|
0
|
Sets the is_billing_admin of this UserBase.
|
def is_billing_admin(self, is_billing_admin):
self._is_billing_admin = is_billing_admin
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_billing_admin(self):\n return self._is_billing_admin",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def default_billing(self, default_billing):\n\n self._default_billing = default_billing",
"def billing(self, billing):\n\n self._billing = billing",
"def super_admin(self, super_admin):\n\n self._super_admin = super_admin",
"def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)",
"def is_admin(self):\n return self.admin",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def set_admin(self, admins):\n self.set_group(self._gp_admin_name, admins)",
"def admin_email(self, admin_email):\n\n self._admin_email = admin_email",
"def is_admin(self):\r\n return self.admin",
"def default_billing(self):\n return self._default_billing",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def billing_info(self, billing_info):\n\n self._billing_info = billing_info",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)",
"def is_admin(self):\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def admin_host(self, admin_host):\n\n self._admin_host = admin_host",
"def billing_currency(self, billing_currency):\n\n self._billing_currency = billing_currency",
"def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True",
"def billing(self):\n return self._billing",
"def is_admin(self):\n return False",
"def is_taxable(self, is_taxable: bool):\n if is_taxable is None:\n raise ValueError(\"Invalid value for `is_taxable`, must not be `None`\") # noqa: E501\n\n self._is_taxable = is_taxable",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def billing_contact(self, billing_contact):\n\n self._billing_contact = billing_contact",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def set_cuda(self, is_cuda):\n self.is_cuda = is_cuda",
"def promote_user(self, username):\n parser_promote.add_argument('isadmin', choices=[\"True\", \"False\"],\n required=True, nullable=False,\n help=\"(Accepted values: True, False)\"\n )\n args = parser_promote.parse_args()\n isAdmin = request.json.get('isadmin')\n\n query = \"\"\"UPDATE users SET isadmin=%s WHERE username=%s\"\"\"\n values = isAdmin, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True",
"def is_guest(self, is_guest):\n\n self._is_guest = is_guest",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def admin_state_up(self, state):\n self.neutron.update_port(self._provision_port_id, {'port': {'admin_state_up': state}})",
"def check_is_admin(current_user):\n return current_user['isAdmin'] == True",
"def set_admin_password(self, instance, new_pass):\n pass",
"def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True",
"def _set_admin_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"admin-status\", rest_name=\"admin-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"admin_status must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"admin-status\", rest_name=\"admin-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__admin_status = t\n if hasattr(self, '_set'):\n self._set()",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def setAllowDelete(self, value, **kwa):\n\n if type(value) == BooleanType:\n self.fgField.allow_delete = value\n else:\n self.fgField.allow_delete = value == '1'",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def up(self):\n self.update(admin_state='1')",
"def add_admin(user):\n _add_owner(\n _lookup_user(user).biv_id,\n _add_model(pam.Admin())\n )",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def is_admin(self, user):\n return user.name in self.admins",
"def SetCurrentUser(self, email, user_id='123456', is_admin=False):\n email = email or ''\n user_id = user_id or ''\n is_admin = '1' if is_admin else '0'\n self.testbed.setup_env(user_is_admin=is_admin,\n user_email=email,\n user_id=user_id,\n overwrite=True)",
"def set_protection_enabled(self, c, state):\n self.enable_protection = state",
"def is_superuser(self):\n return self.is_admin",
"def setGateway(self):\n\t\tself.gatewayip = self.settings.getKeyValue('gatewayip')\n\t\tself.socket.send('setenv gatewayip ' + self.gatewayip+'\\r', 1)\n\t\treturn None",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def need_admin_approval(self, need_admin_approval):\n\n self._need_admin_approval = need_admin_approval",
"def test_admin(self):\r\n \r\n self.assertEqual(False, self.user.isAdmin)",
"def setGateway(self, gateway):\n # type: (str)->None\n\n self._validator.validate_one(\n 'gateway', VALID_OPTS['gateway'], gateway)\n self._ifAttributes['gateway'] = gateway",
"def _reset_admin(self):\r\n DBSession.execute(\r\n \"UPDATE users SET activated='1' WHERE username='admin';\")\r\n Activation.query.delete()\r\n transaction.commit()",
"def allow_purchase_order(self, allow_purchase_order):\n\n self._allow_purchase_order = allow_purchase_order",
"def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")",
"def __add_admin(self):\n log.debug(\"Displaying __add_admin\")\n # Let the admin select an administrator to promote\n user = self.__user_select()\n # Allow the cancellation of the operation\n if isinstance(user, CancelSignal):\n return\n # Check if the user is already an administrator\n admin = self.session.query(db.Admin).filter_by(user_id=user.user_id).one_or_none()\n if admin is None:\n # Create the keyboard to be sent\n keyboard = telegram.ReplyKeyboardMarkup([[self.loc.get(\"emoji_yes\"), self.loc.get(\"emoji_no\")]],\n one_time_keyboard=True)\n # Ask for confirmation\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_confirm_admin_promotion\"),\n reply_markup=keyboard)\n # Wait for an answer\n selection = self.__wait_for_specific_message([self.loc.get(\"emoji_yes\"), self.loc.get(\"emoji_no\")])\n # Proceed only if the answer is yes\n if selection == self.loc.get(\"emoji_no\"):\n return\n # Create a new admin\n admin = db.Admin(user=user,\n edit_products=False,\n receive_orders=False,\n create_transactions=False,\n is_owner=False,\n display_on_help=False)\n self.session.add(admin)\n # Send the empty admin message and record the id\n message = self.bot.send_message(self.chat.id, self.loc.get(\"admin_properties\", name=str(admin.user)))\n # Start accepting edits\n while True:\n # Create the inline keyboard with the admin status\n inline_keyboard = telegram.InlineKeyboardMarkup([\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.edit_products)} {self.loc.get('prop_edit_products')}\",\n callback_data=\"toggle_edit_products\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.receive_orders)} {self.loc.get('prop_receive_orders')}\",\n callback_data=\"toggle_receive_orders\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.create_transactions)} {self.loc.get('prop_create_transactions')}\",\n callback_data=\"toggle_create_transactions\"\n )],\n [telegram.InlineKeyboardButton(\n f\"{self.loc.boolmoji(admin.display_on_help)} {self.loc.get('prop_display_on_help')}\",\n callback_data=\"toggle_display_on_help\"\n )],\n [telegram.InlineKeyboardButton(\n self.loc.get('menu_done'),\n callback_data=\"cmd_done\"\n )]\n ])\n # Update the inline keyboard\n self.bot.edit_message_reply_markup(message_id=message.message_id,\n chat_id=self.chat.id,\n reply_markup=inline_keyboard)\n # Wait for an user answer\n callback = self.__wait_for_inlinekeyboard_callback()\n # Toggle the correct property\n if callback.data == \"toggle_edit_products\":\n admin.edit_products = not admin.edit_products\n elif callback.data == \"toggle_receive_orders\":\n admin.receive_orders = not admin.receive_orders\n elif callback.data == \"toggle_create_transactions\":\n admin.create_transactions = not admin.create_transactions\n elif callback.data == \"toggle_display_on_help\":\n admin.display_on_help = not admin.display_on_help\n elif callback.data == \"cmd_done\":\n break\n self.session.commit()",
"def is_user_admin(self, user):\n return user == self.created_by",
"def set_admins(self, admin_emails):\n self.login('superadmin@example.com', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.ADMIN_EMAILS.name: admin_emails,\n }\n }, csrf_token)\n self.logout()",
"def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1",
"def free_shipping_minimum(self, free_shipping_minimum):\n\n self._free_shipping_minimum = free_shipping_minimum",
"def Build(self,admin): \n\n rv=admin.helper.setUser(self.name,self.__encryptPwd.decode())\n if rv is None:\n return False\n else:\n rv=admin.helper.setAccount(self.name,'ARS')\n if rv is None:\n return False\n else:\n return True",
"def allocSetBillable(alloc, is_billable):\n return Cuebot.getStub('allocation').SetBillable(\n facility_pb2.AllocSetBillableRequest(allocation=alloc, value=is_billable),\n timeout=Cuebot.Timeout)",
"def create_admin():\n admin = models.User(username= 'gallery_admin', email='galleryblockchain@gmail.com', address='#0000' , password =bcrypt.generate_password_hash('toledano',\n current_app.config.get('BCRYPT_LOG_ROUNDS')).decode('utf-8'), admin=True)\n admin.save()",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def is_user_admin(request):\n return request.user.is_superuser",
"def disableBilling(projectID):\n credentials = compute_engine.Credentials()\n\n # Using Python Google API Client Library to construct a Resource object for interacting with an API\n # The name and the version of the API to use can be found here https://developers.google.com/api-client-library/python/apis/\n billing_service = discovery.build('cloudbilling', 'v1', credentials=credentials, cache_discovery=False)\n\n # https://developers.google.com/resources/api-libraries/documentation/cloudbilling/v1/python/latest/cloudbilling_v1.projects.html#getBillingInfo\n billing_info = billing_service.projects().getBillingInfo(name='projects/{}'.format(projectID)).execute()\n if not billing_info or 'billingEnabled' not in billing_info: #billing has already been disabled\n return False\n else: #billing isn't disabled\n #Change to a blank billing account -- disables billing\n billing_info = billing_service.projects().updateBillingInfo(\n name='projects/{}'.format(projectID),\n body={'billingAccountName': ''}\n ).execute()\n logging.info('Disabled billing for {}'.format(projectID)) \n return True",
"def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)",
"def setAllowInsert(self, value, **kwa):\n\n if type(value) == BooleanType:\n self.fgField.allow_insert = value\n else:\n self.fgField.allow_insert = value == '1'",
"def promote_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(id) + \"admin\").encode()).hexdigest()\n curs.execute(\"INSERT INTO admins(id) VALUES(?)\", (encrypted_id,))\n header.commit()\n self.__update_admin_cache()",
"def Build(self,admin):\n \n rv=admin.helper.setAccount(admin.userName,self.currency)\n if rv is None:\n return False\n else:\n return True",
"def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def check_admin():\n if not current_user.is_admin:\n abort(403)",
"def supports_type_admin(self):\n return 'supports_type_admin' in profile.SUPPORTS",
"def get_billing_phone(self):\n if self.billing_phone:\n return self.billing_phone\n else:\n return self.contact.phone",
"def setUpClass(cls, user=''):\n super().setUpClass(first_admin)",
"def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)",
"def make_donor(self):\n self.user.is_staff = False\n self.user.is_superuser = False\n self.user.groups.remove(get_group_by_name(self.ADMIN_GROUP))\n self.user.groups.remove(get_group_by_name(self.AMBASSADOR_GROUP))\n self.user.save()",
"def make_admin(self):\n user_datastore = SQLAlchemyUserDatastore(db, User, Role)\n user_datastore.add_role_to_user(self, 'admin')\n db.session.commit()",
"def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)",
"def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)",
"def tenant_user_admin(db) -> TenantUser:\n with schema_context('public'):\n return TenantUser.objects.create_superuser(\n _USER_PASS,\n email='super@user.com',\n )",
"def promote_user(username):\n user = User.get_user_by_username(username)\n user.is_admin = True\n user.save()",
"def user_is_basenodeadmin(userobj, *basenode_modelsclasses):\n for cls in basenode_modelsclasses:\n if cls.objects.filter(admins__id=userobj.id).exists():\n return True\n return False",
"def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active",
"def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)",
"def allow_3rd_party_billing(self, allow_3rd_party_billing):\n\n self._allow_3rd_party_billing = allow_3rd_party_billing",
"async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:\n is_admin = await auth.is_server_admin(requester)\n if not is_admin:\n raise AuthError(HTTPStatus.FORBIDDEN, \"You are not a server admin\")",
"def is_acd(self, is_acd):\n \n self._is_acd = is_acd",
"def manual_tax_type(self, manual_tax_type):\n\n self._manual_tax_type = manual_tax_type",
"def set_all_ports_admin_disabled(self):\n pass",
"def clean(self, *args, **kwargs):\n\n super(AuthenticateAdminForm, self).clean(*args, **kwargs)\n\n if self.user_cache is not None and not self.user_cache.is_staff:\n raise forms.ValidationError(_('User is not admin'))\n\n return self.cleaned_data"
] |
[
"0.737039",
"0.6722866",
"0.6429481",
"0.59705704",
"0.5679304",
"0.5440798",
"0.5371101",
"0.52895933",
"0.52759075",
"0.52700037",
"0.52601206",
"0.5256608",
"0.5244197",
"0.5224175",
"0.517771",
"0.51590234",
"0.5110502",
"0.50515014",
"0.503452",
"0.503452",
"0.50335866",
"0.5030447",
"0.498472",
"0.4955349",
"0.49360868",
"0.49040446",
"0.48618492",
"0.48221278",
"0.48101145",
"0.4800235",
"0.47561318",
"0.4749647",
"0.47283855",
"0.47181082",
"0.4714194",
"0.47044584",
"0.46892405",
"0.46793258",
"0.46673325",
"0.46382117",
"0.46057075",
"0.45959383",
"0.45734802",
"0.45605156",
"0.45594925",
"0.45503876",
"0.454579",
"0.45261988",
"0.44993463",
"0.44961554",
"0.44768515",
"0.44758865",
"0.44743684",
"0.44686788",
"0.44627565",
"0.44586015",
"0.4454763",
"0.44520172",
"0.44497398",
"0.44436714",
"0.44230396",
"0.44145662",
"0.44012728",
"0.4390473",
"0.43904525",
"0.43858835",
"0.4376425",
"0.43712974",
"0.4296701",
"0.42882016",
"0.4284634",
"0.42624483",
"0.4257925",
"0.4251526",
"0.42515117",
"0.42454645",
"0.42391384",
"0.4230829",
"0.42295024",
"0.42288923",
"0.42288923",
"0.42250252",
"0.42234468",
"0.42211905",
"0.4216693",
"0.42151472",
"0.42019862",
"0.42004135",
"0.4199786",
"0.41744325",
"0.41736758",
"0.41688153",
"0.41672206",
"0.4163687",
"0.41630697",
"0.41599533",
"0.41548556",
"0.41537383",
"0.41510993",
"0.41497627"
] |
0.8755264
|
0
|
Gets the role of this UserBase.
|
def role(self):
return self._role
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_role(self):\n return self.role",
"def role(self):\n\n return self._role",
"def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')",
"def role(self) -> str:\n return pulumi.get(self, \"role\")",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")",
"def _get_role(self):\n return self.__role",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')",
"def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()",
"def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')",
"def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")",
"def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def getRole(self):\n return _libsbml.ReferenceGlyph_getRole(self)",
"def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]",
"def get(self):\n return self._roles.get(self._id)",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> \"Role\":\n return Role(connection=self)",
"def role_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role_id\")",
"def role(self, verify_key: VerifyKey) -> Dict[Any, Any]:\n user = self.get_user(verify_key)\n return user.role",
"def get_role(self, name):\n role = Role.query.filter_by(name=name).first()\n\n return role",
"def get(self, id):\n return Role.query.filter(Role.id == id).one()",
"def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")",
"def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover",
"def role(self):\n try:\n self._role = c_char(self.lib.iperf_get_test_role(self._test)).value.decode('utf-8')\n except TypeError:\n self._role = c_char(chr(self.lib.iperf_get_test_role(self._test))).value.decode('utf-8')\n return self._role",
"def getRole(self, node):\n info = self.getNode(node, includeDevices=False)\n if info is None:\n self.log.error(\"could not get role because '%s' does not exist\", node)\n return None\n return info.role",
"def getRole(self):\n return _libsbml.SpeciesReferenceGlyph_getRole(self)",
"def get(self, role_id):\n return self.client.get_role(role_id)",
"def role(self) -> GameRole:\n pass",
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def get_user_role():\n\n if session['user_role'] == 'student':\n return student\n elif session['user_role'] == 'tutor':\n return tutor\n else:\n raise Exception(\"User is not student or tutor. Who is user?\")",
"def highest_role(self):\n roles = ['superuser', 'staff', 'member', 'user', 'disabled']\n for role in roles:\n if role in self.roles():\n return role",
"def get_role(self, role_id: int, /) -> Optional[Role]:\n return self.guild.get_role(role_id) if self._roles.has(role_id) else None",
"def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")",
"def role_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"role_arn\")",
"def get_user_role(cls, community_id, account_id):\n\n return DB.query_one(\"\"\"SELECT role_id FROM hive_roles\n WHERE community_id = :community_id\n AND account_id = :account_id\n LIMIT 1\"\"\",\n community_id=community_id,\n account_id=account_id) or Role.guest.value",
"def get(self, uuid):\n logger.info(\"Get a specific role by Id\", data=uuid)\n\n role = Role.query.get(uuid)\n return role_schema.jsonify(role)",
"def get_human_role(self):\n return UnitMembershipRole(self.role).label",
"def role_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_id\")",
"def get_role(role_id: int) -> Optional[Role]:\n return db.session.query(Role).get(role_id)",
"def getSelfRole(store):\n return getAccountRole(store, userbase.getAccountNames(store))",
"def get_cached_role(self):\n cache = self.get_cache()\n if cache.disabled:\n return self\n roles = cache.get(self.ROLES_BY_ID)\n if roles is None or self.id not in roles:\n self.update_cache()\n roles = cache.get(self.ROLES_BY_ID)\n return roles.get(self.id, self)",
"def get_role(self, role_name):\n role_record = self.list_roles(('name', role_name))\n if len(role_record) < 1:\n raise Exception('Role \\'%s\\' does not exist.' % role_name)\n return role_record[0]",
"def get_user_role_by_id(self, user_id):\n try:\n role = self.db_handler.get_user_role_by_id(user_id)\n\n self.logger.write_to_log('got user role information', user_id)\n\n return role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def single_role(self):\n return None",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")",
"def get_this_party_role(self):\n this_party_idx = self._all_parties.index(self._this_party)\n this_party_role = self._parameter.task_role[this_party_idx]\n return this_party_role",
"def token_role(self, role):\n return self.read('auth/token/roles/{0}'.format(role))",
"def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles",
"def get_role(self):\n memberships = Membership.objects.filter(person = self, entity__abstract_entity = False, importance_to_person__gte = 2).order_by('-importance_to_person')\n if memberships:\n return memberships[0]\n else: # the poor person had no memberships\n return None",
"def iam_role_arn(self) -> str:\n return pulumi.get(self, \"iam_role_arn\")",
"def target_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"target_role\")",
"def getRole(self, desired=None):\n strDes = str(desired)\n logging.debug(\"[LaymanAuthLiferay][getRole]: '%s'\"%strDes)\n if not self.authorised:\n logging.error(\"[LaymanAuthLiferay][getRole] The user is not authorised\")\n raise AuthError(401, \"I am sorry, but you are not authorised\")\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"roles\"]:\n roles = self.authJson[\"userInfo\"][\"roles\"]\n if len(roles) < 1:\n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay provided empty list of roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay provided empty list of roles\") \n\n theRole = roles[0]\n for r in roles:\n if desired == r[\"roleName\"]:\n theRole = r\n\n #lower and spaces\n #theRole[\"roleName\"] = theRole[\"roleName\"].lower()\n #theRole[\"roleName\"] = \"_\".join(theRole[\"roleName\"].split(' '))\n roleName = theRole[\"roleName\"]\n logging.debug(\"[LaymanAuthLiferay][getRole] The role: '%s'\"% roleName)\n return theRole\n else: \n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay did not provide user's roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay did not provide user's roles\")",
"async def get_role(self, guild: discord.Guild, create: bool = False, updatedb: bool = True) -> discord.Role | None:\n # Create role if necessary or return None since no role id\n if self.role is None:\n return await self.create_role(guild, updatedb=updatedb) if create else None\n\n # Try to find role in cache\n if not (role := guild.get_role(self.role)):\n return await self.create_role(guild, updatedb=updatedb) if create else None\n return role",
"def get_user_roles(self):\n url = 'userroles'\n result = self.get(url)\n return result.get('userroles', result)",
"def get_task_role(self):\n self.fix_arguments()\n if self.task_role is not None:\n comp = self.task_role\n elif self._model_instance:\n #fetch the default task role for the entire model\n #this can raise an exception if there isn't a\n #default task role defined for the model\n comp = self._model_instance.get_task_role()\n else:\n raise ConfigException(\"Can't find a task role for task {}\".format(self.name))\n return comp",
"def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")",
"def getRoleString(self):\n return _libsbml.SpeciesReferenceGlyph_getRoleString(self)",
"def service_role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_role\")",
"def get_roles(self):\n return [role.role_id for role in self.roles if role]",
"def get_role(resource_root, service_name, name, cluster_name=\"default\"):\n return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))",
"def __repr__(self):\n return '<Role %r>' % self.name",
"def find_role(self, name):\n return self.get_session.query(self.role_model).filter_by(name=name).one_or_none()",
"def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")",
"def target_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_role\")",
"def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()",
"def get_computed_role(self, element):\n pass",
"def get_roles(self, include_remote=True):\n rbac_service = get_rbac_backend().get_service_class()\n result = rbac_service.get_roles_for_user(\n user_db=self, include_remote=include_remote\n )\n return result",
"def roles(self):\n return self._roles",
"def get_role(self, role_name):\n try:\n response = self._client.get_role(RoleName=role_name)\n except Exception as e:\n return False\n\n return response",
"def get_user_role(user, course_key):\r\n if is_masquerading_as_student(user):\r\n return 'student'\r\n elif has_access(user, 'instructor', course_key):\r\n return 'instructor'\r\n elif has_access(user, 'staff', course_key):\r\n return 'staff'\r\n else:\r\n return 'student'",
"def getRole(self, desired=None):\n return {\"roleName\":\"hasici\",\n \"roleTitle\":\"Soptici\"}",
"def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")",
"def service_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_role\")",
"def __repr__(self):\n return '<Role({name})>'.format(name=self.name)",
"def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")",
"def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")",
"def listRoles(self):\n return self._client.listRoles()",
"def get_user_roles(user=None):\n if user is None:\n user = g.user\n return user.roles",
"def get_role_by_id(self, role_id):\n try:\n role = self.db_handler.get_role_by_id(role_id)\n\n self.logger.write_to_log('got role by id', 'model')\n return role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def getRoles(self):",
"def get(self, key: typing.Union[str, int], default: default_var=None) \\\n -> 'typing.Union[role.Role, default_var]':\n if isinstance(key, int):\n return self._roles.get(key, default)\n else:\n return self._get_by_name(key, default=default)",
"def get_task_role(self):\n if self.default_task_role is None and self.delegate is None:\n raise ConfigException(\"No default task role defined on the config model\")\n\n if self.namespace_model_instance is None:\n raise ConfigException(\"ConfigModel instance can't get a default task role from a Namespace model reference without an instance of that model\")\n \n comp_ref = self.namespace_model_instance.get_inst_ref(self.default_task_role)\n comp_ref.fix_arguments()\n return comp_ref.value()",
"def getRoleInfo(self, role_id):\n return self._roles[role_id]",
"def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")",
"def execution_role_arn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"execution_role_arn\")",
"def invocation_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"invocation_role\")",
"def action_role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get(\"action_role\")",
"def service_role(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_role\")",
"def get_roles():\n return config.get_cfg_storage(ID_ROLE)",
"def get_role(role_id):\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id))\n\treturn response.json()",
"async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)"
] |
[
"0.7832626",
"0.7500845",
"0.74306035",
"0.73451483",
"0.71249825",
"0.7112837",
"0.71025026",
"0.70645654",
"0.698936",
"0.6881144",
"0.67973137",
"0.6794101",
"0.6794101",
"0.6781229",
"0.67532885",
"0.6691753",
"0.6611382",
"0.6611382",
"0.6611382",
"0.653817",
"0.63703835",
"0.6368493",
"0.6365552",
"0.633757",
"0.6266957",
"0.6244703",
"0.62220556",
"0.6221027",
"0.6215984",
"0.61909956",
"0.6171645",
"0.6171013",
"0.6171013",
"0.6164465",
"0.6128886",
"0.60883397",
"0.6062568",
"0.6047627",
"0.6031953",
"0.5995377",
"0.5978126",
"0.5959352",
"0.59346175",
"0.5918326",
"0.58925605",
"0.58568555",
"0.5856385",
"0.5833792",
"0.5832623",
"0.5832623",
"0.5832623",
"0.5832623",
"0.5821134",
"0.5792492",
"0.57820743",
"0.57801473",
"0.5743632",
"0.572552",
"0.57254267",
"0.5706",
"0.5683953",
"0.56768966",
"0.5670724",
"0.56699246",
"0.5645992",
"0.5622642",
"0.56094676",
"0.5589733",
"0.5578023",
"0.5569525",
"0.5569525",
"0.5561355",
"0.55548143",
"0.553132",
"0.5505809",
"0.54952693",
"0.5456553",
"0.5420367",
"0.5419545",
"0.5419545",
"0.54143846",
"0.5410088",
"0.5410088",
"0.54090637",
"0.54080117",
"0.5388533",
"0.5381344",
"0.53790265",
"0.5370178",
"0.53563684",
"0.5328401",
"0.5328401",
"0.5316238",
"0.53149337",
"0.5299767",
"0.52893436",
"0.52886283",
"0.5284979"
] |
0.7420549
|
5
|
Sets the role of this UserBase.
|
def role(self, role):
allowed_values = [100, 200, 300, 400, 600] # noqa: E501
if role not in allowed_values:
raise ValueError(
"Invalid value for `role` ({0}), must be one of {1}"
.format(role, allowed_values)
)
self._role = role
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _set_role(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"role must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'4 .. 32']}), is_leaf=True, yang_name=\"role\", rest_name=\"role\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Role of the user'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__role = t\n if hasattr(self, '_set'):\n self._set()",
"def role(self, role):\n\n self._role = int(role)",
"def set_role(self, user, role):\n obj = self._get_through_object(user)\n obj.role = role if isinstance(role, int) else obj.ROLE_MAP_REV[role]\n obj.save()",
"def role(self, role):\n\n self._role = role",
"def role(self, role):\n\n self._role = role",
"def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"def set_role(userid, role, group, request=None):",
"def setRole(self, *args):\n return _libsbml.ReferenceGlyph_setRole(self, *args)",
"def set_role(self, group, role):\n self.permissions[group] = roles[role]",
"def define_role(self, role):\n\n self._db_manager.create_role(role)",
"def set_rights(self, user, role):\n for_user = User.get_user_by_username(user)\n role = UserRole(role)\n self.access_handler.check_set_rights(for_user, role)\n for_user.set_user_type(role)",
"def setRole(self, *args):\n return _libsbml.SpeciesReferenceGlyph_setRole(self, *args)",
"def set_task_role(self, task_role):\n self._task_role = task_role",
"def update_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover",
"def set_role(username, role_name=\"\"):\n\tsession = get_session()\n\tdata = {\"username\": username, \"role\": role_name}\n\tsession.post(\"{url}/api/users/set_role\".format(url=get_registry_url()), json=data)",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"def roles(self, roles):\n\n self._roles = roles",
"def role(self):\n\n return self._role",
"def update(self, role):\n self._router_request(\n self._make_request_data(\n 'updateAdminRole',\n data=dict(\n params=dict(\n uid=self.uid,\n name=self.name,\n role=role\n )\n )\n )\n )\n\n self.role = role\n\n return True",
"async def userrole(self, ctx, *, role=None):\n server = ctx.message.guild\n\n if not role:\n result = await self.bot.db.config.find_one({'_id': str(server.id)})\n if result and result.get('user_role'):\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'The current user role is `{result.get(\"user_role\")}`. '\n f'To change it type `{result.get(\"prefix\")}userrole <role name>`')\n else:\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'No user role set. '\n f'To set one type `{result.get(\"prefix\")}userrole <role name>`')\n elif role in [r.name for r in server.roles]:\n await self.bot.db.config.update_one({'_id': str(server.id)}, {'$set': {'user_role': str(role)}}, upsert=True)\n await ctx.send(f'Server role `{role}` can now create and manage their own polls.')\n else:\n await ctx.send(f'Server role `{role}` not found.')",
"def _overrideRole(self, newRole, args):\n oldRole = args.get('role', None)\n args['role'] = newRole\n return oldRole",
"def changeRole(self, node, role):",
"def change_project_role(self, project_id: int, role: str) -> None:\n\n session = create_session()\n session.execute(association_table_user_to_project.update().where(\n association_table_user_to_project.c.project_id == project_id).where(\n association_table_user_to_project.c.member_id == self.id).values(\n project_role=role))\n session.merge(self)\n session.commit()",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self) -> str:\n return pulumi.get(self, \"role\")",
"def role(self, roleSpec):\n self.sparkProperties[SparkProperties.SPARK_MESOS_ROLE] = roleSpec\n return self",
"def change_user_role(username, new_role):\n user_connector.change_user_role(username, new_role)",
"def get_role(self):\n return self.role",
"def update(self, role):\n model = models.load('Role', role)\n model.account_id = self.account_id\n\n return self.client.update_role(model)",
"async def save(self):\n await config.member(self.member).set_raw(str(self.role.id), value=self.as_dict)",
"def getUserRole(self):\n\n # general question concerning the user's role (hersteller, beauftragter...)\n self.roleView.getUserRole()",
"def __init__(self, role_name, org='', course_key=None):\r\n super(RoleBase, self).__init__()\r\n\r\n self.org = org\r\n self.course_key = course_key\r\n self._role_name = role_name",
"def setRole(self, room, nick, role):\n if role not in ('moderator', 'participant', 'visitor', 'none'):\n raise TypeError\n query = ET.Element('{http://jabber.org/protocol/muc#admin}query')\n item = ET.Element('item', {'role':role, 'nick':nick}) \n query.append(item)\n iq = self.xmpp.makeIqSet(query)\n iq['to'] = room\n result = iq.send()\n if result is False or result['type'] != 'result':\n raise ValueError\n return True",
"def can_set_role(userid, role, group):",
"def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')",
"def role(self) -> \"Role\":\n return Role(connection=self)",
"def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n\n # Set default role for a regular new User\n self.role = Role.query.filter_by(default=True).first()",
"def set_keystone_v3_role(self, role_id, role_new_name):\n LOG_OBJ.debug(\"Creating the role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _role_info = {\"role\": {\n \"name\": role_new_name}}\n _body = json.dumps(_role_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the role\")\n print (\"No response from Server while set the role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\" Set role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n return True",
"def create_role(self, **kwargs):\n role = self.role_model(**kwargs)\n # noinspection PyUnresolvedReferences\n return self.save(role)",
"def set_task_role(self, task_role):\n if not isinstance(task_role, AbstractModelReference):\n raise ConfigException(\"A default task role was supplied that isn't some kind of model reference: %s\" %\n str(task_role))\n self.default_task_role = task_role",
"def _override_role(self, test_obj, toggle_rbac_role=False):\n self.user_id = test_obj.os_primary.credentials.user_id\n self.project_id = test_obj.os_primary.credentials.tenant_id\n self.token = test_obj.os_primary.auth_provider.get_token()\n\n LOG.debug('Overriding role to: %s.', toggle_rbac_role)\n role_already_present = False\n\n try:\n if not all([self.admin_role_id, self.rbac_role_id]):\n self._get_roles_by_name()\n\n target_role = (\n self.rbac_role_id if toggle_rbac_role else self.admin_role_id)\n role_already_present = self._list_and_clear_user_roles_on_project(\n target_role)\n\n # Do not override roles if `target_role` already exists.\n if not role_already_present:\n self._create_user_role_on_project(target_role)\n except Exception as exp:\n with excutils.save_and_reraise_exception():\n LOG.exception(exp)\n finally:\n auth_providers = test_obj.get_auth_providers()\n for provider in auth_providers:\n provider.clear_auth()\n # Fernet tokens are not subsecond aware so sleep to ensure we are\n # passing the second boundary before attempting to authenticate.\n # Only sleep if a token revocation occurred as a result of role\n # overriding. This will optimize test runtime in the case where\n # ``[identity] admin_role`` == ``[patrole] rbac_test_role``.\n if not role_already_present:\n time.sleep(1)\n\n for provider in auth_providers:\n provider.set_auth()",
"def save(self):\n body = {}\n body[\"permissions\"] = dict(self.permissions)\n body[\"name\"] = self.name\n body[\"description\"] = self.description\n _, role = self._requestor.patch('/roles/' + self._id, json=body)\n self._data = role\n self.name = role[\"name\"]\n self.description = role[\"description\"]\n self.system = role[\"system\"]\n self.permissions = dict(role[\"permissions\"])",
"def role(self) -> GameRole:\n pass",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")",
"def set_user_role(request):\n id_user = request.POST.get('user_id')\n role = request.POST.get('role')\n id_projet = request.POST.get('project_id')\n # retrieves the user whose role needs to be changed\n user_to_modify = User.objects.get(pk=id_user)\n # check if user can attribute role for the project\n project = UtilsData.get_object_by_type_and_id('project', id_projet)\n if request.user.can_affect(project):\n # Verifies if the user whose role is to be changed is the administrator\n if user_to_modify.is_superuser:\n return HttpResponse(json.dumps(\"error you can't remove admin role\"),\n content_type=\"application/json\")\n else:\n # change role\n project.setRole(user_to_modify, role)\n return HttpResponse(json.dumps(\"ok\"),\n content_type=\"application/json\")",
"def create_role(self, **kwargs):\n\n role = self.role_model(**kwargs)\n return self.put(role)",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def user_role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_role\")",
"def assign_member(self, project_id, user_id, role_id):\n resp = {}\n path = '/projects/%s/users/%s/roles/%s' % (project_id, user_id, role_id)\n res = self.client.call(path, 'PUT', data='', \n token=self.manager.identity.token) \n \n self.logger.debug('Grant role %s to user %s on project %s' % \n (project_id, user_id, role_id))\n return True",
"def SetNodeRole(self, node, role, force=False, auto_promote=None,\n reason=None):\n query = []\n _AppendForceIf(query, force)\n _AppendIf(query, auto_promote is not None, (\"auto-promote\", auto_promote))\n _AppendReason(query, reason)\n\n return self._SendRequest(HTTP_PUT,\n (\"/%s/nodes/%s/role\" %\n (GANETI_RAPI_VERSION, node)), query, role)",
"def add_role(self, role_id: str, current_user_id=None):\n if RoleModel.is_valid_role(role_id) and not self.has_role(role_id):\n user_role = UserRoleModel(user_id=self.id, role_id=role_id, lastchange_by=current_user_id)\n self.roles.append(user_role)",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"def role(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role\")",
"async def setjoinrole(self, ctx, role):\r\n guild = ctx.message.guild\r\n role = discord.utils.get(guild.roles, name=role)\r\n functions.updatesql(server=ctx.guild.id, joinrole=role.id)\r\n await ctx.send(embed=discord.Embed(title='Sucsess!', color=discord.Colour.from_rgb(255, 0, 255)))",
"def __restoreRole(self, session):\r\n if self.__role:\r\n _logger.info(\"Switching user to role: %s\" % self.__role)\r\n\r\n session.role = self.__role\r\n self.__role = None\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"async def temprole(self, ctx: commands.Context, *, role: discord.Role = None):\n guild = ctx.guild\n role_config = self.config.guild(guild)\n role_set = await role_config.temprole()\n if role is None and role_set:\n await role_config.temprole.clear()\n return await ctx.send(\"Cleared the role being used.\")\n if role:\n if role >= ctx.author.top_role:\n return await ctx.send(\"You can't set a role equal to or higher than your own.\")\n\n if role >= ctx.guild.me.top_role:\n return await ctx.send(\n \"You can't set a role that's equal to or higher than the bot.\"\n )\n await role_config.temprole.set(role.id)\n await ctx.send(\n \"Set the role to {}.\".format(role.mention),\n allowed_mentions=discord.AllowedMentions(roles=False),\n )\n else:\n await ctx.send_help()",
"def addRole(self, role):\n self._client.addRole(role)",
"def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]",
"def assign_user_role(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)",
"def setup_test_role(self):\n self.test_role = rand_name('role')\n resp, self.role = self.client.create_role(self.test_role)\n self.roles.append(self.role)",
"def __init__(self, role):\n self.name = role",
"def set_owner(self, role, check_mode=True):\n query = 'ALTER SUBSCRIPTION %s OWNER TO \"%s\"' % (self.name, role)\n return self.__exec_sql(query, check_mode=check_mode)",
"def set_roles_callback(self, func):\n self._roles_callback = func",
"def create_role(self, role_id, role):\n raise exception.NotImplemented() # pragma: no cover",
"async def setRoles(self, ctx: Context, person: Member, roles: Greedy[Role]):\n roles = remove_dupe_roles(roles)\n\n await person.edit(roles=roles)\n await ctx.send(f\"Setting {roles_str(person, roles)}\")",
"def role(self):\n try:\n self._role = c_char(self.lib.iperf_get_test_role(self._test)).value.decode('utf-8')\n except TypeError:\n self._role = c_char(chr(self.lib.iperf_get_test_role(self._test))).value.decode('utf-8')\n return self._role",
"def add_role(self, role, parents=[]):\r\n self._roles.setdefault(role, set())\r\n self._roles[role].update(parents)",
"def user_role(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_role\")",
"def role_arn(self) -> str:\n return pulumi.get(self, \"role_arn\")",
"async def create(self, **kwargs) -> 'role.Role':\n if not self._guild.me.guild_permissions.manage_roles:\n raise PermissionsError(\"manage_roles\")\n\n role_obb = role.Role(client=self._guild._bot,\n **(await self._guild._bot.http.create_role(self._guild.id)))\n self._roles[role_obb.id] = role_obb\n role_obb.guild_id = self._guild.id\n return await role_obb.edit(**kwargs)",
"def assign_user_role_on_project(self, project_id, user_id, role_id):\n resp, body = self.put('projects/%s/users/%s/roles/%s' %\n (project_id, user_id, role_id), None)\n self.expected_success(204, resp.status)\n return service_client.ResponseBody(resp, body)",
"def addUserRole(self, name, role):\n self._client.addUserRole(name, role)",
"def updateRole(self, role_id, title, description):\n self._roles[role_id].update({'title': title,\n 'description': description})",
"async def roles(self, ctx, *, role: Fuzzy[Selfrole] = None):\n\n if role:\n await self._toggle_role(ctx, role)\n else:\n await self._list_all_roles(ctx)",
"def role(name,\n description,\n privileges,\n base_roles):\n script_name = 'setup_role'\n script_data = nexus_groovy.setup_role\n\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': '\"{0}\" script run for role: {1}'.format(script_name, name)}\n\n script_args = {'id': name,\n 'name': name,\n 'description': description,\n 'privileges': privileges,\n 'roles': base_roles}\n\n results = _script_processor(script_name, script_data, script_args, ret)\n\n return results",
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def role_arn(self) -> Optional[str]:\n return pulumi.get(self, \"role_arn\")",
"def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")",
"async def command_assign_role(self, context, role: str):\n try:\n await context.author.add_roles(discord.utils.get(\n context.guild.roles, name=role))\n await context.message.add_reaction('👍')\n except Exception as e:\n await context.message.add_reaction('👎')\n await context.send('Role could not be assigned')\n print(f'Errored in command_assign_role.', e)",
"def role_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_id\")",
"def patch(self, username, role):\n try:\n UserService.add_role_to_user(token_auth.current_user(), username, role)\n return {\"Success\": \"Role Added\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403",
"def set_current_roles(self, roles):\n if not isinstance(roles, list):\n raise TypeError()\n\n self._current_roles = roles",
"def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover",
"def _get_role(self):\n return self.__role",
"def update(self, username, password, rol, **kwargs):\n\n self.usuario.groups.set([rol])\n if username != self.usuario.username:\n self.usuario.username = username\n \n if password:\n self.usuario.set_password(password)\n \n self.usuario.save()\n\n for field, value in kwargs.items():\n setattr(self, field, value)\n\n self.save()",
"def role_arn(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_arn\")",
"def add_role(self, role):\n if role.name not in [r.name for r in self.roles]:\n return db[self.colNam].find_and_modify(query=dict(_id=self.id), update={'$push': {'roles': role.to_python()}})",
"def set_permissions(self, role):\n if role == User.ROLE_ADMIN:\n for perm in permissions.admin_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_MANAGER:\n for perm in permissions.manager_permissions():\n self.user_permissions.add(perm)\n elif role == User.ROLE_SUB_MANAGER:\n for perm in permissions.sub_manager_permissions():\n self.user_permissions.add(perm)\n else:\n for perm in permissions.user_permissions():\n self.user_permissions.add(perm)",
"def iam_roles(self, iam_roles):\n\n self._iam_roles = iam_roles",
"def author_role(self,author,role=None):\n\n rowEle = self._get_author_row(author)\n roleEle = self.find_element(self.locators['role'],rowEle)\n\n #FIXME: shenanigans begin\n roleid = roleEle.get_attribute('id')\n key = \"roleid-%s\" % (roleid)\n self.locators[key] = \"css=#%s\" % (roleid)\n obj = Select(self,{'base':key})\n obj.detach_from_owner()\n #FIXME: shenanigans end\n\n oldrole = obj.selected()\n if role:\n obj.value = role\n # click the \"save changes\" button\n self.submit.click()\n del obj\n del self.locators[key]\n return oldrole",
"def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")",
"def set_is_staff(self, role):\n self.is_staff = (role != User.ROLE_USER)",
"def create_role(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_role\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_role`\")\n\n resource_path = '/oapi/v1/roles'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Role',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def role(self):\n _DEPRECATION_ERROR_ATTRIBUTE(\n self, \"role\", \"Use attribute 'construct_type' instead\"\n ) # pragma: no cover",
"def role_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_arn\")"
] |
[
"0.7486664",
"0.7334197",
"0.712406",
"0.7118152",
"0.7118152",
"0.66809034",
"0.65651333",
"0.65358007",
"0.6365971",
"0.63436615",
"0.63090795",
"0.6153752",
"0.6122553",
"0.6036801",
"0.60276306",
"0.5917643",
"0.5917643",
"0.5917643",
"0.5857858",
"0.5825336",
"0.58118975",
"0.58086795",
"0.58086646",
"0.5786089",
"0.5759325",
"0.5759325",
"0.5759325",
"0.56841975",
"0.5683853",
"0.5683629",
"0.56736034",
"0.5576112",
"0.555212",
"0.55476046",
"0.55435544",
"0.5542378",
"0.54986167",
"0.5459487",
"0.54472727",
"0.5431123",
"0.54216856",
"0.5412128",
"0.5402641",
"0.53884375",
"0.5377366",
"0.5372322",
"0.53645134",
"0.5356368",
"0.53503644",
"0.5335175",
"0.53347135",
"0.53347135",
"0.5328578",
"0.53173786",
"0.5292078",
"0.52906114",
"0.52906114",
"0.52906114",
"0.5289574",
"0.5269944",
"0.52146417",
"0.5202217",
"0.51996946",
"0.5197497",
"0.5186714",
"0.5179213",
"0.5173628",
"0.51692945",
"0.5159142",
"0.5145278",
"0.5143737",
"0.5141381",
"0.51359916",
"0.51119953",
"0.5106521",
"0.51029193",
"0.50991696",
"0.5094871",
"0.5090424",
"0.5085645",
"0.5061731",
"0.5061731",
"0.50560576",
"0.5054662",
"0.5046748",
"0.5044087",
"0.50421727",
"0.50391024",
"0.5037464",
"0.50162005",
"0.50125945",
"0.5000614",
"0.50001544",
"0.49894276",
"0.4987239",
"0.49861023",
"0.49651235",
"0.4952669",
"0.49495378",
"0.49329874"
] |
0.6081036
|
13
|
Gets the bot_type of this UserBase.
|
def bot_type(self):
return self._bot_type
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def chat_type(self) -> str:\n return self.user.chat_type.name",
"def lobby_type(self):\n return self._get(\"lobby_type\")",
"def get_type(self) -> str:\n return Tables.USER.name",
"def user_type(self):\n if \"userType\" in self._prop_dict:\n return self._prop_dict[\"userType\"]\n else:\n return None",
"def user_type(self):\n if \"userType\" in self._prop_dict:\n return self._prop_dict[\"userType\"]\n else:\n return None",
"def get_type(self):\n return self.type",
"def get_type(self):\n return self.type",
"def get_type(self):\n return self._type",
"def get_type(self):\n return self._type",
"def bot(self):\n return self._bot",
"def get_type(self):\n return self._type_obj",
"def get_type(self) -> str:\n return self.type",
"def get_type(self):\n return self._TYPE",
"def user_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_type\")",
"def user_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_type\")",
"def user_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_type\")",
"def bot_type(self, bot_type):\n\n self._bot_type = bot_type",
"def get_entity_type(self):\n return self.entity_type",
"def getType(self):\n return self.type_",
"def get_session_type(self) -> Type[ba.Session]:\n return self._sessiontype",
"def getType(self):\n return self.type",
"def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)",
"def get_hashtype(self):\n return self.__hashtype",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self._type",
"def type(self):\n return self.__type",
"def entity_type(self):\n return self._entity_type",
"def stor_type(self):\n type = self.type\n if isinstance(type, Enum): return type.base\n return type",
"def GetType(self):\r\n\r\n return self._type",
"def type(self):\n return self._getValue('type')",
"def entity_type(self) -> str:\n return self._entity_type",
"def getType(self):\n return self.tipo",
"def type(self) -> str:\n\n return self._type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def _get_type(self):\n return self.__type",
"def type(self) -> str:\n return self._type",
"def type(self) -> str:\n return self._type",
"def type(self) -> str:\n return self._type",
"def user_cls(self):\n return self.get_entity_cls('user')",
"def get_type ( self, object ):\n return self.type",
"def get_type ( self, object ):\n return self.type",
"def model_type(self):\n return self._model_type",
"def type(self) -> str:\n return self._config.get('type')",
"def getType(self):\n return self._type",
"def type(self) -> str:\n return self.type_",
"def account_type(self) -> str:\n return pulumi.get(self, \"account_type\")",
"def get_type (self):\n return self._stype",
"def type(self):\r\n return self.__type",
"def get_class(self):\n return self.meta_model.get_class()",
"def type(self):\n return self.get(\"type\")",
"def type(self):\n return self.settings[\"type\"]",
"def getType(self,):\n\t\treturn self.type;",
"def token_type(self) -> str:\n return self._token_type",
"def token_type(self) -> str:\n return self._token_type",
"def get_robot_type(robot_name):\n attr = 'robotType'\n path = '{}.{}'.format(get_target_ctrl_path(robot_name), attr)\n return get_attribute_value(path)"
] |
[
"0.64627266",
"0.6358613",
"0.614378",
"0.6125625",
"0.6125625",
"0.59903854",
"0.59903854",
"0.5968016",
"0.5968016",
"0.59540266",
"0.5942109",
"0.58541214",
"0.585159",
"0.58268684",
"0.58268684",
"0.58098865",
"0.57802206",
"0.5750634",
"0.5730113",
"0.56939965",
"0.5684667",
"0.5662987",
"0.5648909",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.56009966",
"0.5597854",
"0.558358",
"0.5575082",
"0.55703056",
"0.55661476",
"0.5559715",
"0.55529064",
"0.55463827",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5534749",
"0.5523015",
"0.5523015",
"0.5523015",
"0.5519813",
"0.551556",
"0.551556",
"0.547366",
"0.54713166",
"0.5466538",
"0.54626316",
"0.54622114",
"0.5457009",
"0.54496545",
"0.54431444",
"0.54414445",
"0.5435195",
"0.5418697",
"0.5400703",
"0.5400703",
"0.53953296"
] |
0.83526576
|
0
|
Sets the bot_type of this UserBase.
|
def bot_type(self, bot_type):
self._bot_type = bot_type
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bot_type(self):\n return self._bot_type",
"def set_type(self, rtype=ALL_USERS):\r\n self.type = rtype",
"def set_type(self, type):\n self.type = type",
"def set_type(self, type):\n self.type = type",
"def is_bot(self, is_bot):\n\n self._is_bot = is_bot",
"def set_type(self, type_balle):\n self.type_balle = type_balle",
"def set_type(self, type):\n self._type = type",
"def set_as_type_user(self):\n self.type = MessageTypes.USER",
"def set_auth_type(self, auth_type):\n pass",
"def entity_type(self, entity_type: str):\n\n self._entity_type = entity_type",
"def engine_type(self, engine_type):\n\n self._engine_type = engine_type",
"def owner_type(self, owner_type):\n\n self._owner_type = owner_type",
"def entity_type(self, entity_type):\n\n self._entity_type = entity_type",
"def type(self, type: str):\n\n self._type = type",
"def account_type(self, account_type):\n\n self._account_type = account_type",
"def account_type(self, account_type):\n\n self._account_type = account_type",
"def account_type(self, account_type):\n\n self._account_type = account_type",
"def account_type(self, account_type):\n\n self._account_type = account_type",
"def entity_type(self, entity_type):\n self._entity_type = entity_type",
"def type(self, type):\n self._type = type",
"def type(self, type):\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def type(self, type):\n\n self._type = type",
"def bottom_type(self, bottom_type):\n\n self._bottom_type = bottom_type",
"def auth_token_validator_type(self, auth_token_validator_type):\n\n self._auth_token_validator_type = auth_token_validator_type",
"def setType(self, type):\n\t\tif not self.Loaded:\n\t\t\tself.type = type\n\t\t\tself.loader = NetLoader.getNetwork(type)\n\t\t\tself.isTypeSet = True",
"def set_filetype(self, filetype, bufnr=None):\n if bufnr:\n self._vim.command(str(bufnr) + 'bufdo set filetype=' + filetype)\n else:\n self._vim.command('set filetype=' + filetype)",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\")\n\n self._type = type",
"async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username",
"def set_entity_owner_account_type(self, username, account_type):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_SET_ENTITY_OWNER_ACCOUNT_TYPE + ':' + username + '|' + account_type)",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def set_type(self, value):\n self._set_one_attribute(self.AttributeNames.TYPE, value)\n return self",
"def set_type(self, _new_type):\n # Check to see if type is changing\n if _new_type == self._type:\n return\n # Move from current boid set to boid set for new type\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[self._grid][_new_type].add(self)\n # Update type\n self._type = _new_type",
"def set_execution_type(self, type):\n self.execution_type = type",
"def SetType(self, ct_type):\r\n\r\n self._type = ct_type",
"def set_chatbot(self, chatbot):\n super(MultiLogicAdapter, self).set_chatbot(chatbot)\n\n for adapter in self.adapters:\n adapter.set_chatbot(chatbot)",
"def set_type(self, type, asset=None):\n self._set_property('pc:type', type, asset)",
"def object_type(self, object_type):\n\n self._object_type = object_type",
"def object_type(self, object_type):\n\n self._object_type = object_type",
"def object_type(self, object_type):\n\n self._object_type = object_type",
"def object_type(self, object_type):\n\n self._object_type = object_type",
"def object_type(self, object_type):\n\n self._object_type = object_type",
"def type(self, type):\n allowed_values = [\"None\", \"File\", \"FileManagerFile\", \"BusOb\", \"History\", \"Other\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type",
"def type(self, type):\n if type is None:\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n allowed_values = [\"classic\", \"trigger\"] # noqa: E501\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type",
"def basetype_setup(self):\n # the text encoding to use.\n self.db.encoding = \"utf-8\"\n # A basic security setup\n lockstring = \"examine:perm(Wizards);edit:perm(Wizards);delete:perm(Wizards);boot:perm(Wizards);msg:false()\"\n self.locks.add(lockstring)\n # set the basics of being a bot\n self.cmdset.add_default(BotCmdSet)\n script_key = \"%s\" % self.key\n self.scripts.add(BotStarter, key=script_key)\n self.is_bot = True",
"def add_bot(self, bot):\n self.add_user(bot)",
"def chat_type(self) -> str:\n return self.user.chat_type.name",
"def type(self, type):\n allowed_values = [\"CUSTOM_AUTHENTICATION\"]\n if not value_allowed_none_or_none_sentinel(type, allowed_values):\n type = 'UNKNOWN_ENUM_VALUE'\n self._type = type",
"def set_as_type_agent(self):\n self.type = MessageTypes.AGENT",
"def wall_type(self, wall_type):\n\n self._wall_type = wall_type",
"def type(self, type):\n allowed_values = [\"I\", \"O\", \"T\"]\n if type not in allowed_values:\n raise ValueError(\n \"Invalid value for `type`, must be one of {0}\"\n .format(allowed_values)\n )\n self._type = type",
"async def _forcesettype(self, ctx, *args):\n if len(args) < 2:\n await ctx.send(\"Include both a name and a type!\")\n return\n\n god = database.getGodName(args[0], ctx.guild.id)\n if god:\n godtypes = []\n for godTypeSet in botutils.godtypes:\n godtypes.append(godTypeSet[0])\n\n if args[1].upper() in godtypes:\n database.setType(god.ID, args[1].upper())\n await ctx.send(\"Set your God's type successfully!\")\n else:\n types_string = \"\"\n i = 1\n for godtype in godtypes:\n if i == 1:\n types_string = godtype\n else:\n types_string = types_string + \", \" + godtype\n i += 1\n await ctx.send(\"Please choose between these types: `\" + types_string + \"`!\")",
"def setType(self,newtype):\n\t\tself.type = newtype;",
"def type_inheritance(self, type_inheritance):\n\n self._type_inheritance = type_inheritance",
"def execution_type(self, execution_type):\n self._execution_type = execution_type",
"def setHgType(self, hgTypeToSet):\n self.hgType = hgTypeToSet",
"def _init_bot(bot_type, game, player_id):\n rng = np.random.RandomState(FLAGS.seed)\n if bot_type == \"mcts\":\n evaluator = mcts.RandomRolloutEvaluator(FLAGS.rollout_count, rng)\n return mcts.MCTSBot(\n game,\n FLAGS.uct_c,\n FLAGS.max_simulations,\n evaluator,\n random_state=rng,\n solve=FLAGS.solve,\n verbose=FLAGS.verbose)\n if bot_type == \"az\":\n model = az_model.Model.from_checkpoint(FLAGS.az_path)\n evaluator = az_evaluator.AlphaZeroEvaluator(game, model)\n return mcts.MCTSBot(\n game,\n FLAGS.uct_c,\n FLAGS.max_simulations,\n evaluator,\n random_state=rng,\n child_selection_fn=mcts.SearchNode.puct_value,\n solve=FLAGS.solve,\n verbose=FLAGS.verbose)\n if bot_type == \"random\":\n return uniform_random.UniformRandomBot(player_id, rng)\n if bot_type == \"human\":\n return human.HumanBot()\n if bot_type == \"gtp\":\n bot = gtp.GTPBot(game, FLAGS.gtp_path)\n for cmd in FLAGS.gtp_cmd:\n bot.gtp_cmd(cmd)\n return bot\n raise ValueError(\"Invalid bot type: %s\" % bot_type)",
"def type(self, type):\n if type is not None and len(type) < 1:\n raise ValueError(\"Invalid value for `type`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type(self, type):\n if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501\n raise ValueError(\"Invalid value for `type`, must not be `None`\") # noqa: E501\n\n self._type = type",
"def type_text(self, type_text):\n\n self._type_text = type_text",
"def _type_validator(self, type=None):\n if type not in ['agents', 'users', 'groups']:\n type = 'users'\n return type",
"def attr_type(self, attr_type):\n\n self._attr_type = attr_type",
"def set_type(self, val):\n if not contain_in_list_equal(val, PARAM_TYPES):\n raise ArgumentError(\"[WARNING] `type`, should be \" + \", \".join(PARAM_TYPES))\n self._type = val\n pass",
"def room_type(self, room_type):\n if room_type is None:\n raise ValueError(\"Invalid value for `room_type`, must not be `None`\")\n\n self._room_type = room_type",
"def app_type(self, app_type):\n\n self._app_type = app_type",
"def membership_type(self, membership_type):\n\n self._membership_type = membership_type",
"def set_builder_bot(self, builder_bot): \n self.builder_bot = builder_bot # pragma: no cover",
"def committee_type(self, committee_type):\n\n self._committee_type = committee_type",
"def set_typ(self, refobj, typ):\n try:\n enum = JB_ReftrackNode.types.index(typ)\n except ValueError:\n raise ValueError(\"The given type %s could not be found in available types: %\" % (typ, JB_ReftrackNode.types))\n cmds.setAttr(\"%s.type\" % refobj, enum)",
"def token_type(self, token_type):\n if self.local_vars_configuration.client_side_validation and token_type is None: # noqa: E501\n raise ValueError(\"Invalid value for `token_type`, must not be `None`\") # noqa: E501\n\n self._token_type = token_type"
] |
[
"0.6873118",
"0.60826707",
"0.5980632",
"0.5980632",
"0.5958783",
"0.59285235",
"0.58882517",
"0.56953204",
"0.56326497",
"0.5610567",
"0.55728096",
"0.5503789",
"0.5461631",
"0.54612577",
"0.5455133",
"0.5455133",
"0.5455133",
"0.5455133",
"0.5442875",
"0.5397702",
"0.5397702",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.53877443",
"0.5374553",
"0.53510666",
"0.53215617",
"0.52407557",
"0.52344805",
"0.52344805",
"0.5203599",
"0.52023035",
"0.5187343",
"0.5187343",
"0.5187343",
"0.51859856",
"0.51833117",
"0.5164366",
"0.5157474",
"0.5149673",
"0.5137561",
"0.51171327",
"0.51171327",
"0.51171327",
"0.51171327",
"0.51171327",
"0.5112438",
"0.51093525",
"0.5106083",
"0.50966686",
"0.50961393",
"0.5094157",
"0.508937",
"0.50809205",
"0.50784427",
"0.5077341",
"0.50552946",
"0.5049779",
"0.5043049",
"0.5038352",
"0.5034711",
"0.50342953",
"0.5021851",
"0.5021851",
"0.5021851",
"0.5021851",
"0.5010855",
"0.5001793",
"0.4999595",
"0.49746075",
"0.4954014",
"0.4950507",
"0.4945108",
"0.49233595",
"0.4914216",
"0.48890275",
"0.488861"
] |
0.8411738
|
0
|
Gets the user_id of this UserBase. The unique ID of the user.
|
def user_id(self):
return self._user_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_user_id(self):\n return self.id_user",
"def user_id(self) -> str:\n return self._user_id",
"def user_id(self) -> str:\n return self._user_id",
"def get_id(self) -> int:\n return self.user_id",
"def get_id(self):\n return self.user_id",
"def user_id(self):\n # type: () -> string_types\n return self._user_id",
"def get_user_id(self):\n raise NotImplementedError",
"def user_id(self):\n return self.status.user[\"id\"]",
"def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self):\n return lamin_user_settings().id",
"def id(self) -> int:\n return self.user.id",
"def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")",
"def getUserID(self):\n\t\treturn self.UserID",
"def get_id(self):\n return self.uid",
"def user_id(self):\n return text_type(hash(self.username))",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")",
"def user_huid(self) -> Optional[UUID]:\n return self.user.user_huid",
"def get_userid(self):\n user_id = \"\"\n if self.is_valid():\n user_id = self.__httprequest.session[\"lti_user_id\"]\n return user_id",
"def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")",
"def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self): \n\t\treturn (self.user_id)",
"def unique_id(self):\n return self._uid",
"def unique_id(self):\n return self._uid",
"def unique_id(self):\n return self._uid",
"def get_userid(self):\n return util.kbase_env.user",
"def get_user_id(self, user):\n\n found_user = self.search(user)\n\n if found_user:\n return found_user[\"data\"][0][\"id\"]\n else:\n raise UserNotFound(\"User \" + user + \" not found.\")",
"def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]",
"def context_user_id(self) -> str | None:\n return bytes_to_uuid_hex_or_none(self.context_user_id_bin)",
"def get_id(self):\r\n return self.username",
"def getId(self):\n return self.getUserName()",
"def getId(self):\n return self.getUserName()",
"def custom_user_id(self):\n # type: () -> string_types\n return self._custom_user_id",
"def user_id(self):\n return json_loads(self.user_json).get('id')",
"def uid(self):\n\n return self._uid",
"def get_current_user(self):\n\n if self._user_id:\n return self._user_id\n endpoint = \"/me\"\n response = self._send(endpoint, \"GET\")\n user_id = response.json()[\"id\"]\n self._user_id = user_id\n return user_id",
"def get_user(self):\n return str(self.request.user.id)",
"def unique_id(self) -> str:\n return self._uid",
"def uid (self):\n return self.__uid",
"def userid(self):\n mtool = getToolByName(self.context, 'portal_membership')\n return mtool.getAuthenticatedMember().getId()",
"def get_uid(self):\n value = unicode(self.id) + self.password + app.config['SECRET_KEY']\n if self.last_login:\n value += self.last_login.strftime('%Y-%m-%d %H:%M:%S')\n return hashlib.sha224(value).hexdigest()[::2]",
"def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None",
"def get_user_id():\n return os.getuid()",
"def user_id_str(self):\n return str(self.status.user['id'])",
"def get_user_id(self, details, response):\n return response['uid']",
"def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None",
"def get_user_primary_key(self, request):\r\n try:\r\n return request.user.pk\r\n except AttributeError:\r\n return ''",
"def userID(self):\r\n return self._userID",
"def userID(self):\r\n return self._userID",
"def unique_id(self):\n return self._id",
"def unique_id(self):\n return self._id",
"def id(self) -> UID:\n return self._id",
"def id(self) -> UID:\n return self._id",
"def _get_uid(self, user):\n if not isinstance(user, int):\n user = pwd.getpwnam(user).pw_uid\n return user",
"def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None",
"def get_current_user_id():\n user = get_current_user()\n return user.pk if user and user.is_authenticated else None",
"def get_user_id(self, details, response):\n return details[\"user_id\"]",
"def get_id(self):\n\n\t\treturn self.__id",
"def get_id(self):\n return self.__id",
"def get_id(self):\n return self.__id",
"def __getNewUserID(self):\n return db_main.getHandle().seqNextVal(\"users_user_id_seq\")",
"def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None",
"def uid(self) -> str:\n return self._uid",
"def unique_id(self):\n return self._uuid",
"def unique_id(self):\n return self._uuid",
"def get_user_id(sdk, username):\n users = sdk.users.get_by_username(username)[\"users\"]\n if not users:\n raise UserDoesNotExistError(username)\n return users[0][\"userUid\"]",
"def users_user_id_get(userId): # noqa: E501\n base.check_session()\n return _cleanuser(_finduser(userId))",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self.id",
"def get_id(self):\n return self._id",
"def get_id(self):\n return self._id",
"def get_id(self):\n return self._id",
"def get_id(self):\n return self._id",
"def get_id(self):\n return self._id"
] |
[
"0.833126",
"0.8074868",
"0.8074868",
"0.79576075",
"0.7943644",
"0.7802189",
"0.77085686",
"0.75711966",
"0.7557508",
"0.74490136",
"0.74490136",
"0.7431941",
"0.7417376",
"0.7399928",
"0.73023885",
"0.7278637",
"0.7249883",
"0.7241275",
"0.71973765",
"0.71973765",
"0.71973765",
"0.714641",
"0.71178436",
"0.7114861",
"0.7114861",
"0.70968974",
"0.70968974",
"0.70968974",
"0.70795053",
"0.7037658",
"0.7037658",
"0.7037658",
"0.70342773",
"0.70208627",
"0.6996466",
"0.6898693",
"0.68640554",
"0.6832465",
"0.6832465",
"0.6817788",
"0.6781457",
"0.6753582",
"0.6753105",
"0.67259413",
"0.67111254",
"0.6695467",
"0.669464",
"0.6644087",
"0.66266143",
"0.6614194",
"0.6598084",
"0.6579749",
"0.6538843",
"0.6532284",
"0.6521869",
"0.6521869",
"0.6518623",
"0.6518623",
"0.65097904",
"0.65097904",
"0.6501146",
"0.64959985",
"0.6487367",
"0.64849716",
"0.6484769",
"0.64614433",
"0.64614433",
"0.64602244",
"0.6452047",
"0.64412767",
"0.64166456",
"0.64166456",
"0.64165187",
"0.6360744",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6358001",
"0.6357223",
"0.6357223",
"0.6357223",
"0.6357223",
"0.6357223"
] |
0.81448174
|
5
|
Sets the user_id of this UserBase. The unique ID of the user.
|
def user_id(self, user_id):
self._user_id = user_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def id_user(self, id_user):\n\n self._id_user = id_user",
"def user_id(self, user_id):\n if user_id is None:\n raise ValueError(\"Invalid value for `user_id`, must not be `None`\") # noqa: E501\n\n self._user_id = user_id",
"def user_id(self, user_id):\n if user_id is None:\n raise ValueError(\"Invalid value for `user_id`, must not be `None`\") # noqa: E501\n\n self._user_id = user_id",
"def user_id(self, user_id: str):\n if user_id is None:\n raise ValueError(\"Invalid value for `user_id`, must not be `None`\") # noqa: E501\n\n self._user_id = user_id",
"def user_id(self, user_id: str):\n if user_id is None:\n raise ValueError(\n \"Invalid value for `user_id`, must not be `None`\"\n ) # noqa: E501\n\n self._user_id = user_id",
"def user(self, user):\n self.user_id = user.get_id()",
"def user_id(self, user_id):\n # type: (string_types) -> None\n\n if user_id is not None:\n if not isinstance(user_id, string_types):\n raise TypeError(\"Invalid type for `user_id`, type has to be `string_types`\")\n\n self._user_id = user_id",
"def set_user_id(uid):\n local.user_id = uid",
"def set_user(self, user: User):\n self.__user = user",
"def set_user(self, user):\r\n self.user = user",
"def service_user_id(self, service_user_id):\n\n self._service_user_id = service_user_id",
"def set_userId(self, userId):\n self.authentication.userId = userId",
"def userid(self, userid):\n\n self._userid = userid",
"def set_user(self, user):\n self._user = user",
"def security_user_id(self, security_user_id):\n\n self._security_user_id = security_user_id",
"def security_user_id(self, security_user_id):\n\n self._security_user_id = security_user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def set_user(self, user_model):\n\n self.user_model = user_model\n return self",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def set_su_user_id(self, su_user_id):\n su_user = self.available_users().get(id=su_user_id)\n self.set_su_user(su_user)",
"def borrow_user_id(self, borrow_user_id):\n\n self._borrow_user_id = borrow_user_id",
"def get_user_id(self):\n return self.id_user",
"def user_id(self) -> str:\n return self._user_id",
"def user_id(self) -> str:\n return self._user_id",
"def addUserId(self, user_id):\n self.__register_user_ids.add(user_id)",
"def reinitUser(self, id : int):\n id = self.validateID(id)\n # ensure the ID exists in the database\n if not self.userIDExists(id):\n raise KeyError(\"user not found: \" + str(id))\n # Reset the user\n self.users[id].resetUser()",
"def put(self, user_id):\n data = request.json\n return update_user(data, user_id)",
"def user_id(self, user_id):\n warnings.warn(\"Runtime.user_id is deprecated\", UserIdDeprecationWarning, stacklevel=2)\n self._deprecated_per_instance_user_id = user_id",
"def _save_user(self, user):\n self.firebase.patch(f'/{self.USERS_KEY}', {str(user.id): user.username})",
"def user(self, user_id):\r\n return User(self, user_id)",
"def put(self, user_id):\r\n return update_user(request, user_id)",
"def user(self, user_id=None):\n if user_id is None:\n return users.CurrentUser(self)\n\n return users.User(self, user_id)",
"def user(self, user_id=None):\r\n if user_id is None:\r\n return users.CurrentUser(self)\r\n\r\n return users.User(self, user_id)",
"def __init__(self, user_id):\n self.id = user_id",
"def set_id(self, id):\n self.__id = id",
"def uid(self, uid):\n\n self._uid = uid",
"def uid(self, uid):\n\n self._uid = uid",
"def uid(self, uid):\n\n self._uid = uid",
"def uid(self, uid):\n\n self._uid = uid",
"def register_user_telegram_id(self, user_id):\n try:\n self.db_handler.set_user_telegram_id(user_id, self.get_config_value('NEW_RATE'))\n self.logger.write_to_log('user telegram id added to db', user_id)\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def set_as_walker(self, user_id):\n user = self.user_repository.read(user_id)\n user_dict = asdict(user)\n user_dict[\"override_id\"] = user_dict[\"id\"]\n del user_dict[\"id\"]\n user_dict[\"is_available\"] = False\n self.create(user_dict)",
"def user_id(self):\n return lamin_user_settings().id",
"def set_user_attribute(self, key, val):\n self._user_attributes[key] = val",
"def set_su_user(self, su_user):\n self.request.session[AUTH_SESSION_KEY] = su_user.id\n self.request.session[AUTH_HASH_SESSION_KEY] = su_user.get_session_auth_hash()\n if su_user.id != self.auth_user.id:\n self.request.session[\"su_auth_user_id\"] = self.auth_user.id\n else:\n self.request.session.pop(\"su_auth_user_id\", None)\n self.request.session.save()\n self.request.user = su_user",
"def uid(self, value):\n self._uid = value",
"def custom_user_id(self, custom_user_id):\n # type: (string_types) -> None\n\n if custom_user_id is not None:\n if not isinstance(custom_user_id, string_types):\n raise TypeError(\"Invalid type for `custom_user_id`, type has to be `string_types`\")\n\n self._custom_user_id = custom_user_id",
"def set_username(self, user_id: int, username: str):\n user: User = UserService.get_user_by_id(self, user_id)\n if user is not None and username is not None:\n if user.username is None:\n user.username = username\n try:\n user.save()\n return model_to_dict(user)\n except BaseException:\n raise ValueError(HTTPStatus.CONFLICT,\n 'Username already exists')\n else:\n raise ValueError(HTTPStatus.NOT_MODIFIED,\n 'Username can only be set once')\n else:\n raise ValueError(HTTPStatus.BAD_REQUEST, 'Username is required')",
"def user_id(self):\n # type: () -> string_types\n return self._user_id",
"def user_id(self):\n return self.status.user[\"id\"]",
"def set_id(self, id_):\n\n self.id_ = id_",
"def update_user(self, user_id, **kwargs):\n user = self.get(user_id, raise_error=True)\n if 'display_name' in kwargs:\n user.display_name = kwargs['display_name']\n if 'email' in kwargs:\n user.email = kwargs['email']\n if 'verified' in kwargs:\n user.verified = kwargs['verified']\n self.session.add(user)",
"def user(self, user_id):\r\n return users.User(self, user_id)",
"def user(self, user_id):\r\n return users.User(self, user_id)",
"async def add_user(self, user_id) -> None:\n # await self.conn.execute(\n # \"INSERT INTO tg_users(userid) VALUES $1 ON CONFLICT DO NOTHING\",\n # user_id,\n # )\n return",
"def add_user(self, user_id):\n user_doc = {\n 'type': 'user',\n 'name': user_id\n }\n return self.add_doc_if_not_exists(user_doc, 'name')",
"def set_state_of_user(user_id, state):\n if not ValidStates.has_value(state):\n raise RuntimeError(\"Invalid event alteration state reached\")\n UserEventAlterationMachine.state_dict[user_id] = state",
"def getUser(self, id):\n if not isinstance(id, int):\n # Must be a string. Get the UserId first.\n id = self.getUserId(id)\n u = self.users[id]\n while isinstance(u, int):\n id = u\n u = self.users[id]\n u.id = id\n return u",
"def set_id(self, id):\n self.id = id\n print(\"self id = \" + str(self.id))",
"async def setuserinfo(self, ctx, server: str, user_uuid: str, user_intid: str):\n self.settings.setUserInfo(server, user_uuid, user_intid)\n await ctx.send(inline('Done'))",
"def setObjectID(self, id):\n\n self.objectID = id[0]",
"def setID(self, id):\r\n raise NotImplementedError(\"must be implemented in subclass\")",
"def set(self, **kwargs: Any) -> None: # nosec\n attributes = {}\n user_id: int = int(kwargs[\"user_id\"])\n user = self.first(id_int=user_id)\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = _serialize(user, to_bytes=True)\n\n self.update_one(query={\"id_int\": user_id}, values=attributes)",
"def SetId(self, id):\n self.id = int(id)",
"def put_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n user = request.get_json()\r\n if user is None:\r\n abort(400, \"Not a JSON\")\r\n for key, value in user.items():\r\n if key not in ['id', 'email', 'created_at', 'updated_at']:\r\n setattr(obj, key, value)\r\n obj.save()\r\n return jsonify(obj.to_dict()), 200",
"def _update_user(cursor, user_id, user):\n # Create a tuple with user fields\n user_data = (user[User.PROPERTIES.FOLLOWERS],\n user[User.PROPERTIES.FOLLOWING],\n user[User.PROPERTIES.DESIGNS],\n user[User.PROPERTIES.COLLECTIONS],\n user[User.PROPERTIES.MAKES],\n user[User.PROPERTIES.LIKES],\n user[User.PROPERTIES.SKILL_LEVEL],\n user_id)\n\n cursor.execute(dbq.UPDATE_USER, user_data)\n logger.debug(\"user_id {} updated\".format(user_id))",
"def get_user_id(self):\n raise NotImplementedError",
"def set_id(self, id_=None):\n if id_ is None:\n self.id = id(self)\n else:\n self.id = id_",
"def user(self, user_id=None):\r\n return users.User(self, user_id)",
"def set_id(self, id):\n self.data['id'] = id",
"def get_user(self, user_id=None):\n raise NotImplementedError",
"def user(self, user_id=None):\r\n if user_id is None:\r\n return resources.CurrentUser(self)\r\n return resources.User(self, user_id)",
"def user_to_user(self, user_to_user):\n\n self._user_to_user = user_to_user",
"def setUser(self, user):\n libxml2mod.xmlURISetUser(self._o, user)",
"def _set_id(self, value):\n pass",
"def get_id(self) -> int:\n return self.user_id"
] |
[
"0.8062206",
"0.7894141",
"0.7894141",
"0.78666085",
"0.7841344",
"0.7453738",
"0.7334409",
"0.7047939",
"0.69315517",
"0.6743944",
"0.67409",
"0.67378694",
"0.67298764",
"0.65137416",
"0.6411481",
"0.6411481",
"0.6393266",
"0.6393266",
"0.6393266",
"0.6393266",
"0.6393266",
"0.6381729",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.6348913",
"0.634445",
"0.6235679",
"0.62291205",
"0.621169",
"0.621169",
"0.61412853",
"0.6055607",
"0.5941267",
"0.592406",
"0.58937776",
"0.58767945",
"0.58522165",
"0.5837087",
"0.5810054",
"0.58097786",
"0.5808667",
"0.57696724",
"0.57696724",
"0.57696724",
"0.57696724",
"0.57540363",
"0.5725247",
"0.5724653",
"0.5722674",
"0.5713253",
"0.5712845",
"0.5700931",
"0.568061",
"0.5670267",
"0.566371",
"0.5663295",
"0.5619414",
"0.56185126",
"0.56185126",
"0.55938107",
"0.55878246",
"0.558241",
"0.55804956",
"0.5553373",
"0.55515987",
"0.55438805",
"0.5540797",
"0.55380416",
"0.55236906",
"0.55230993",
"0.55118656",
"0.5495069",
"0.547739",
"0.54708993",
"0.54682815",
"0.54637986",
"0.54629374",
"0.54495305",
"0.54453975",
"0.54373896",
"0.54344285"
] |
0.8119237
|
10
|
Gets the bot_owner_id of this UserBase.
|
def bot_owner_id(self):
return self._bot_owner_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])",
"def owner_id(self) -> int:\n return self.proto.owner",
"def owner_id(self):\n return self._owner_id",
"def owner_id(self) -> str:\n return self.__owner_id",
"def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")",
"def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")",
"def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None",
"def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")",
"def get_owner(self, obj):\n return obj.user.username",
"def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")",
"def get_owner(self):\n return self.__owner",
"def _get_owner(self):\n if self.resource.owner is not None:\n try:\n return pwd.getpwnam(self.resource.owner).pw_uid\n except KeyError:\n raise error.InvalidUser()",
"def get_owner(self):\n return self.settings.get(\"owner\", None)",
"def getOwner(self):\n return self.__owner",
"def getOwner(self):\n return self.__owner",
"def bot_owner_id(self, bot_owner_id):\n\n self._bot_owner_id = bot_owner_id",
"def owner(self) -> str:\n return self._owner",
"def user(self):\n return self.owner.user",
"def business_owner(self):\n return self._business_owner",
"def getOwner(self):\r\n return self.owner",
"def owner(self):\n return Organization.objects.get(id=self.owner_id)",
"def owner(self):\n answer = self._call('owner')\n return answer.owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def owner(self):\n return self._owner",
"def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user",
"def get_owner_object(self):\n return None",
"def get_owner_object(self):\n return None",
"def get_owner_object(self):\n return None",
"def owner(self):\n return self.__owner",
"def get_user_id(self):\n return self.id_user",
"def GetOwnerManager(self):\r\n\r\n return self._owner_mgr",
"def user_org_id(self) -> str:\n return self._user_org_id",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def get_id(self):\n return self.username",
"def fb_id(self):\n social_auth = self.social_auth.latest('id')\n return social_auth.uid",
"def get_id(self):\n return self.user_id",
"def get_slack_token_owner():\n response = slack_client.api_call(\n \"auth.test\",\n )\n if not response.get(\"ok\", False):\n raise SlackError('Failed to get slack token owner {}'.format(response['error']))\n return response['user_id']",
"def username(self) -> str:\n return self._data['Owner']",
"def business_owner_email(self):\n return self._business_owner_email",
"def get_id(self) -> int:\n return self.user_id",
"def getOwner(owner_id):\n return Owner(Cuebot.getStub('owner').GetOwner(\n host_pb2.OwnerGetOwnerRequest(name=owner_id), timeout=Cuebot.Timeout).owner)",
"def owner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner\")",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def user_id(self):\n return self._user_id",
"def get_owner_private_messages(self):\n owner_id = self.bot_data_file[\"owners_data\"][\"ownerPrivateMessagesID\"]\n if owner_id == \"\":\n print(\"ERROR GETTING THE OWNER ID - EMPTY\")\n return \"\"\n else:\n return owner_id",
"def get_owner_object(self):\n return self",
"def user_id(self) -> str:\n return self._user_id",
"def user_id(self) -> str:\n return self._user_id",
"def get_id(self):\r\n return self.username",
"def owner(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"owner\")",
"def id(self):\n return self.settings['your_botid']",
"def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]",
"def user_id(self):\n return self.status.user[\"id\"]",
"def _get_user_id(self):\n auth_response = self._slack_client.api_call(\"auth.test\")\n\n if auth_response.get(\"ok\") is not None and auth_response[\"ok\"]:\n bot_id = auth_response[\"user_id\"]\n logger.info(\"Connected to slack with user id: {}\".format(bot_id))\n return bot_id\n else:\n raise PermissionError(auth_response[\"error\"])",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def owner(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner\")",
"def ownerOfObject(obj):\n udb, uid = obj.getOwnerTuple()\n root = obj.getPhysicalRoot()\n udb = root.unrestrictedTraverse(udb, None)\n if udb is None:\n user = SpecialUsers.nobody\n else:\n user = udb.getUserById(uid, None)\n if user is None:\n user = SpecialUsers.nobody\n else:\n user = user.__of__(udb)\n return user",
"def owner_type(self) -> str:\n return pulumi.get(self, \"owner_type\")",
"def get_owner(self):\n owner = gdef.PSID()\n lpbOwnerDefaulted = gdef.BOOL()\n winproxy.GetSecurityDescriptorOwner(self, owner, lpbOwnerDefaulted)\n # Return None of owner is NULL\n return owner or None",
"def owner(self):\n if self.get_team():\n return self.get_team()\n return None",
"def calc_owner(self, activation):\n from django.contrib.auth import get_user_model\n\n owner = self._owner\n if callable(owner):\n owner = owner(activation)\n elif isinstance(owner, dict):\n owner = get_user_model()._default_manager.get(**owner)\n return owner",
"def get_user_id(self):\n raise NotImplementedError",
"def manage_owner(owner_id):\n\n return _get_owner_service().get_owner(owner_id)",
"def user_id(self):\n return text_type(hash(self.username))",
"def user_id(self):\n # type: () -> string_types\n return self._user_id",
"def get_owner_name(self):\n\t\treturn call_sdk_function('PrlAcl_GetOwnerName', self.handle)",
"def id(self) -> int:\n return self.user.id",
"def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")",
"def source_owner(self) -> str:\n return pulumi.get(self, \"source_owner\")",
"def get_id(self):\n return self.uid",
"def user_huid(self) -> Optional[UUID]:\n return self.user.user_huid",
"def bot(self):\n return self._bot",
"def get_owner(self):\n return self._creatorsHeap[0][1]",
"def get_userid(self):\n return util.kbase_env.user",
"def user_id(self):\n return lamin_user_settings().id",
"def get_owner(self, property_name):\n\n property_owner = self.db.read_value(property_name, \"owner\")\n return property_owner",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs[\"pk\"])",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs[\"pk\"])",
"def unique_id(self):\n return self._uid",
"def unique_id(self):\n return self._uid",
"def unique_id(self):\n return self._uid",
"def get_current_user(self):\n\n if self._user_id:\n return self._user_id\n endpoint = \"/me\"\n response = self._send(endpoint, \"GET\")\n user_id = response.json()[\"id\"]\n self._user_id = user_id\n return user_id",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(km_user__pk=self.kwargs.get(\"pk\"))",
"def getOwnerIdFromToken(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def getId(self):\n return self.getUserName()",
"def getId(self):\n return self.getUserName()",
"def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id",
"def get_score_owner(self) -> Address:\n return self.owner",
"def get_subscription_owner(self, request):\n return get_user_model().objects.get(\n km_user__profile__pk=self.kwargs.get(\"pk\")\n )",
"def get_user(self):\n return str(self.request.user.id)",
"def get_player_id(self):\n return self.game.get_player_id()",
"def user(self):\n return self._forced_user"
] |
[
"0.74863726",
"0.7462389",
"0.7429973",
"0.73903984",
"0.7214011",
"0.7123705",
"0.70963526",
"0.7041297",
"0.6885209",
"0.6862868",
"0.68442214",
"0.6656169",
"0.66238314",
"0.65125304",
"0.65125304",
"0.6489912",
"0.6465008",
"0.639576",
"0.636058",
"0.63590413",
"0.63192767",
"0.6281676",
"0.6201579",
"0.6201579",
"0.6201579",
"0.6201579",
"0.6201579",
"0.616952",
"0.6150902",
"0.6150902",
"0.6150902",
"0.61466384",
"0.6047676",
"0.60352814",
"0.602826",
"0.59995675",
"0.59995675",
"0.59995675",
"0.59937227",
"0.5982564",
"0.5977698",
"0.5969508",
"0.5963957",
"0.595243",
"0.5919383",
"0.5919129",
"0.59139705",
"0.5908654",
"0.5908654",
"0.5908654",
"0.5908654",
"0.5908654",
"0.5880753",
"0.58786976",
"0.58778894",
"0.58778894",
"0.58629704",
"0.58348036",
"0.58302",
"0.5828229",
"0.5785613",
"0.5779458",
"0.575684",
"0.575684",
"0.5747609",
"0.57344776",
"0.5712333",
"0.5700976",
"0.5679608",
"0.566586",
"0.56636155",
"0.5632653",
"0.563125",
"0.56311864",
"0.5620881",
"0.5613772",
"0.5613772",
"0.56130344",
"0.5548774",
"0.55332524",
"0.55271566",
"0.55211306",
"0.55082464",
"0.547731",
"0.5473829",
"0.5473829",
"0.546402",
"0.546402",
"0.546402",
"0.54551774",
"0.54459137",
"0.5442143",
"0.5432493",
"0.5432493",
"0.54186505",
"0.54092866",
"0.53262633",
"0.53161836",
"0.53129375",
"0.5310757"
] |
0.86790913
|
0
|
Sets the bot_owner_id of this UserBase.
|
def bot_owner_id(self, bot_owner_id):
self._bot_owner_id = bot_owner_id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def owner_id(self, owner_id):\n\n self._owner_id = owner_id",
"def owner_id(self, owner_id):\n self._owner_id = owner_id",
"def bot_owner_id(self):\n return self._bot_owner_id",
"def set_owner(self, owner):\n self.__owner = owner",
"def owner_id(self, owner_id):\n if owner_id is None:\n raise ValueError(\"Invalid value for `owner_id`, must not be `None`\") # noqa: E501\n\n self._owner_id = owner_id",
"def set_owner(self, owner):\n self.settings[\"owner\"] = owner",
"def owner(self, owner: str):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def owner(self, owner):\n self._owner = owner",
"def add_owner_id(data=None, **kw):\n data['owner_id'] = current_user.id",
"def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)",
"def scope_owner(self, scope_owner):\n\n self._scope_owner = scope_owner",
"def business_owner(self, business_owner):\n\n self._business_owner = business_owner",
"def set_owner(self, owner: Optional[\"STACObject_Type\"]) -> \"Link\":\n self.owner = owner\n return self",
"def owner_id(self) -> str:\n return self.__owner_id",
"def owner(self, owner):\n if self.local_vars_configuration.client_side_validation and owner is None: # noqa: E501\n raise ValueError(\"Invalid value for `owner`, must not be `None`\") # noqa: E501\n\n self._owner = owner",
"def owner_id(self):\n return self._owner_id",
"def migrate_fix_invalid_bot_owner_values(\n apps: StateApps, schema_editor: BaseDatabaseSchemaEditor\n) -> None:\n UserProfile = apps.get_model(\"zerver\", \"UserProfile\")\n UserProfile.objects.filter(is_bot=False).exclude(bot_owner=None).update(bot_owner=None)",
"async def set_bot():\n\n self = await LOCAL.APP.get_me()\n LOCAL.bot_id = self.id\n LOCAL.bot_name = self.first_name\n LOCAL.bot_username = self.username",
"def owner_reference(self, owner_reference):\n\n self._owner_reference = owner_reference",
"def owner_id(self) -> int:\n return self.proto.owner",
"def is_owner(self, is_owner):\n\n self._is_owner = is_owner",
"def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email",
"def owner_type(self, owner_type):\n\n self._owner_type = owner_type",
"def owner(self) -> None:\n return self.bot.get_user(self.bot.config.owner_ids[0])",
"def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None",
"def save_model(self, request, obj, form, change):\n try:\n owner = form.instance.owner\n except models.Application.owner.RelatedObjectDoesNotExist:\n form.instance.owner = request.user\n\n super().save_model(request, obj, form, change)",
"def add_bot(self, bot):\n self.add_user(bot)",
"def setOwnerPassword(self,value):\n self.PDFreactorConfiguration.in1[\"ownerPassword\"] = value",
"def owner_id(self) -> str:\n return pulumi.get(self, \"owner_id\")",
"def set_owner(plugin_id, username, logger, client):\n plugin = client.plugins.set_owner(plugin_id, username)\n logger.info('Plugin `%s` is now owned by user `%s`.',\n plugin_id, plugin.get('created_by'))",
"def owner_id(self) -> Optional[str]:\n return pulumi.get(self, \"owner_id\")",
"def set_owner_allowed(self, data):\n self._owner_allowed = self._uni(data)",
"def id_user(self, id_user):\n\n self._id_user = id_user",
"def manage_owner(owner_id):\n\n return _get_owner_service().get_owner(owner_id)",
"def pre_save(self, obj):\n obj.owner = self.request.user",
"def is_bot_owner(ctx: commands.Context) -> bool:\n return ctx.author.id == int(open(\"data/metadata/owner.id.txt\", \"r\").read())",
"def _set_owner(atom_list, owner_array, atm, mol_id):\n # This could be written more simply as a recursive function, but that leads\n # to stack overflows, so I flattened it into an iterative one.\n partners = [atom_list[atm].bond_partners]\n loop_index = [0]\n atom_list[atm].marked = mol_id\n while len(partners) > 0:\n if loop_index[-1] >= len(partners[-1]):\n partners.pop()\n loop_index.pop()\n continue\n partner = partners[-1][loop_index[-1]]\n loop_index[-1] += 1\n if not partner.marked:\n owner_array.append(partner.idx)\n partner.marked = mol_id\n partners.append(partner.bond_partners)\n loop_index.append(0)\n elif partner.marked != mol_id:\n raise MoleculeError('Atom %d in multiple molecules' % partner.idx)",
"def opponent_id(self, opponent_id):\n\n self._opponent_id = opponent_id",
"def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner",
"def owner_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"owner_account_id\")",
"def link_owner_to_company(self, owner_id: str, company_id: str):\n return self.create(\n from_object=owner_id,\n to_object=company_id,\n definition=Definitions.OWNER_TO_COMPANY,\n )",
"def set_player_id(self, player_id):\n self.player_id = player_id",
"def set_id(self, player_id):\n pass",
"def get_owner(self, obj):\n return obj.user.username",
"def set_object_owner(self, bucket_name, object_name, uid, gid):\n\n return h3lib.set_object_owner(self._handle, bucket_name, object_name, uid, gid, self._user_id)",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def user_id(self, user_id):\n\n self._user_id = user_id",
"def set_userId(self, userId):\n self.authentication.userId = userId",
"def set_builder_bot(self, builder_bot): \n self.builder_bot = builder_bot # pragma: no cover",
"def bot_type(self, bot_type):\n\n self._bot_type = bot_type",
"def technical_owner(self, technical_owner):\n\n self._technical_owner = technical_owner",
"def channel_addowner(token, channel_id, u_id):\n auth_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if u_id in channel[\"owners\"]:\n raise ValueError(\"user is already an owner\")\n user = users.get(auth_u_id)\n if auth_u_id not in channel[\"owners\"] and user[\"is_admin\"] is False:\n raise AccessError(\"You do not have permission to add owners\")\n\n channels.set(channel_id, \"owners\", u_id)",
"def __set_sender_id(self, sender_id):\n if not isinstance(sender_id, int):\n raise TypeError('It has to be an integer identifier')\n if sender_id < 0:\n raise ValueError('There are not negative identifiers')\n self.__sender_id = sender_id",
"def add_bot_user(self, effective_user, bot_id):\n self.execute(TABELLE['bot_users']['insert'], (bot_id, effective_user['id'], effective_user['language_code'],))",
"def set_entity_owner_account_type(self, username, account_type):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_SET_ENTITY_OWNER_ACCOUNT_TYPE + ':' + username + '|' + account_type)",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def player_id(self, player_id):\n\n self._player_id = player_id",
"def setOid(self, value):\n return self.getDbRecord().setColumnValue(OID_COLUMN, value)",
"def setOwner(self, long_name, short_name=None):\n nChars = 3\n minChars = 2\n if long_name is not None:\n long_name = long_name.strip()\n if short_name is None:\n words = long_name.split()\n if len(long_name) <= nChars:\n short_name = long_name\n elif len(words) >= minChars:\n short_name = ''.join(map(lambda word: word[0], words))\n else:\n trans = str.maketrans(dict.fromkeys('aeiouAEIOU'))\n short_name = long_name[0] + long_name[1:].translate(trans)\n if len(short_name) < nChars:\n short_name = long_name[:nChars]\n t = mesh_pb2.ToRadio()\n if long_name is not None:\n t.set_owner.long_name = long_name\n if short_name is not None:\n short_name = short_name.strip()\n if len(short_name) > nChars:\n short_name = short_name[:nChars]\n t.set_owner.short_name = short_name\n self._sendToRadio(t)",
"def set_user_id(uid):\n local.user_id = uid",
"def set_user(self, user_model):\n\n self.user_model = user_model\n return self",
"def put(self, user_id):\n self.conn = pecan.request.db_conn\n self.conn.change_billing_owner(request.context,\n project_id=self.project_id,\n user_id=user_id)",
"def possessed_by(self, other):\r\n self.owner = other",
"def get_owner(self):\n return self.__owner",
"def owner_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"owner_account_id\")",
"def add_owner(self, *, table_uri: str, owner: str) -> None:\n user = RDSUser(rk=owner, email=owner)\n table_owner = RDSTableOwner(table_rk=table_uri, user_rk=owner)\n try:\n with self.client.create_session() as session:\n session.merge(user)\n session.merge(table_owner)\n session.commit()\n except Exception as e:\n LOGGER.exception(f'Failed to add owner {owner} for table {table_uri}')\n raise e",
"def player_b_id(self, player_b_id):\n\n self._player_b_id = player_b_id",
"def change_ownership(obj, userid):\n assert isinstance(userid, string_types)\n old_owner = obj.creators[0]\n if userid == old_owner:\n return\n #Remove Owner group from old owner\n obj.local_roles.remove(old_owner, ROLE_OWNER)\n #Add new owner\n obj.local_roles.add(userid, ROLE_OWNER)\n #Set new owner in creators attr - this will also trigger reindex catalog event so keep it last!\n obj.set_field_appstruct({'creators': (userid,)})\n return userid",
"async def slashtagset_appid(self, ctx: commands.Context, id: int = None):\n app_id = id or self.bot.user.id\n await self.config.application_id.set(app_id)\n self.application_id = app_id\n await ctx.send(f\"Application ID set to `{id}`.\")",
"def owner(self) -> str:\n return self._owner",
"def set_user(self, user):\r\n self.user = user",
"def make_logged_in_owner(self):\n response = self.client.post('', self.credentials, follow=True)",
"def save(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n if not user:\n raise ValueError(\"User not present in the model\")\n if not hasattr(self, 'owner'):\n self.owner = user\n elif not self.can_be_edited(user):\n raise ValueError(\"User can't edit the model\")\n self.last_updated_by = user\n self.last_updated_datetime = datetime.datetime.now()\n super(BaseModel, self).save(*args, **kwargs)",
"def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user",
"def __init__(self, owner):\n self._owner = owner",
"def set_user(self, user):\n self._user = user",
"def set_owner_name(self, data, **kwargs):\n try:\n git_url = GitURL.parse(data[\"git_url\"])\n except UnicodeError as e:\n raise ValidationError(\"`git_url` contains unsupported characters\") from e\n except ConfigurationError as e:\n raise ValidationError(\"Invalid `git_url`\") from e\n\n if git_url.owner is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"owner\"] = git_url.owner\n\n if git_url.name is None:\n raise ValidationError(\"Invalid `git_url`\")\n data[\"name\"] = git_url.name\n data[\"slug\"] = normalize_to_ascii(data[\"name\"])\n\n return data",
"def git_username(self, git_username):\n self._git_username = git_username",
"def user(self, user):\n self.user_id = user.get_id()",
"def git_username_user_attribute(self, git_username_user_attribute):\n self._git_username_user_attribute = git_username_user_attribute",
"def transfer_ownership(self, user):\n new_owner = get_user_model().objects.filter(is_active=True) \\\n .get(pk=user.pk)\n self.owner = new_owner"
] |
[
"0.73704034",
"0.733693",
"0.71219534",
"0.69730103",
"0.6940321",
"0.6678052",
"0.64090276",
"0.62162405",
"0.62162405",
"0.62162405",
"0.62162405",
"0.6192035",
"0.6192035",
"0.6192035",
"0.6192035",
"0.60748756",
"0.6034818",
"0.59885144",
"0.5901746",
"0.58401716",
"0.57850367",
"0.5725079",
"0.57130444",
"0.56528014",
"0.5643406",
"0.56280756",
"0.5620602",
"0.560691",
"0.5572163",
"0.5538237",
"0.5534089",
"0.5413427",
"0.5362116",
"0.52563405",
"0.52388537",
"0.52329403",
"0.52299714",
"0.52219564",
"0.5220575",
"0.5209483",
"0.5199951",
"0.5193824",
"0.5189747",
"0.51827455",
"0.51777416",
"0.5159458",
"0.51546645",
"0.5101768",
"0.50877976",
"0.5047517",
"0.5028625",
"0.50279593",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49792704",
"0.49753907",
"0.49660817",
"0.49640727",
"0.48907095",
"0.4838971",
"0.4836257",
"0.48143518",
"0.47839093",
"0.47801638",
"0.47801638",
"0.47764578",
"0.47759494",
"0.4729325",
"0.47264108",
"0.47252208",
"0.47219735",
"0.4717806",
"0.47176793",
"0.47065347",
"0.47053942",
"0.467113",
"0.4653148",
"0.46451426",
"0.46378163",
"0.4636094",
"0.46343434",
"0.46264508",
"0.4604547",
"0.46023124",
"0.4601912",
"0.46003062",
"0.45921263",
"0.4591829",
"0.45525983"
] |
0.8572632
|
0
|
Gets the is_active of this UserBase. A boolean specifying whether the user account has been deactivated.
|
def is_active(self):
return self._is_active
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def is_active_user(self):\n\n return self.is_active",
"def is_active(self):\n return self.user.is_active",
"def is_active(self):\n return self.status == ACTIVE_USER",
"def is_active(self) -> bool:\n return self.__is_active",
"def is_active(self) -> bool:\n return self._is_active",
"def is_active(self):\n return self._active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def is_activated(self):\n return self._activated",
"def is_active(self) -> bool:\r\n return self.active",
"def isActive(self):\n return self.data.active",
"def is_active(self):\r\n return self.active",
"def IsActive(self):\r\n\r\n return self.active",
"def active(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> bool:\n return pulumi.get(self, \"active\")",
"def is_active(self):\n return self._is_record_status(self.ACTIVE)",
"def is_active(self) -> bool:\n return self.active == \"active\"",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"active\")",
"def get_active(self):\n return self._active",
"def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active",
"def is_active(self):\n\n active = bool(\n self.__is_connected and\n self._db_connection and\n self._db_transaction and\n self._db_connection.in_transaction() and\n self._db_transaction.is_active\n )\n\n if not active and self.__is_connected:\n self._cleanup()\n\n return active",
"def active(self) -> bool:\n return self._active",
"def active(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"active\")",
"def is_active(self):\n return self.type_id in ACTIVE_STATES",
"def _checkUserInactive(username,self):\r\n active = False\r\n user = _findUser(username)\r\n \r\n if user is not None:\r\n active = user.getIsActive()\r\n \r\n return active",
"def is_active(self, is_active: bool):\n if is_active is None:\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\")\n\n self._is_active = is_active",
"def users_active(self):\n return self.users(\"inactive == NO\")",
"def get_isenabled(self):\n return self.isenabled",
"def active(self):\n return self._active",
"def active(self):\n return self._active",
"def is_active(self) -> bool:\n if not self.expires_at:\n return False\n return self.expires_at > datetime.datetime.now()",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def get_is_active(self, instance):\n return instance.check_finish()",
"def user_is_activated(self, user_name):\n return not self._simultanious_log_ins and \\\n user_name in self._active_users_names",
"def active(self):\n return self.owner.active",
"def IsActive(self):\n return True",
"def active(self) -> bool:\n return self.orchestration_status == \"Active\"",
"def active(self) -> bool:\n return self.relay(\"active\")",
"def is_active(self, is_active):\n \n self._is_active = is_active",
"def IsEnabled(self):\r\n\r\n return self._enabled",
"def enabled(self):\n return self._get('enabled')",
"def is_active(self) -> bool:\n return self._stream.active",
"def is_enabled(self):\n return self.enabled",
"def is_active(self):\n return self.stream.is_active()",
"def is_active(self) -> bool:\n return not any((self.is_ancillary, self.is_removed, self.is_system))",
"def is_enabled(self):\n return self._enabled",
"def is_enabled(self):\n return self._enabled",
"def is_active(self) -> bool:",
"def is_active():\n return True",
"def is_active(self, is_active):\n if self.local_vars_configuration.client_side_validation and is_active is None: # noqa: E501\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\") # noqa: E501\n\n self._is_active = is_active",
"def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False",
"def is_enabled(self):\n return self._is_enabled",
"def is_on(self):\n return self._data[\"enabled\"]",
"def is_active(self):\n return self.state != 'checkout'",
"def is_active(self):\r\n return True",
"def is_active(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-active').succeeded",
"def is_valid(self):\n return self.is_active",
"def active(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"active\")",
"def enabled(self):\n\n return self._enabled",
"def activated_on(self):\n return self._activated_on",
"def fusion_api_get_active_user(self):\n return self.loginsession.get_active_user()",
"def get_is_portal_enabled(self):\n return self.is_portal_enabled",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def account_enabled(self):\n if \"accountEnabled\" in self._prop_dict:\n return self._prop_dict[\"accountEnabled\"]\n else:\n return None",
"def Enabled(self):\n return self._get_attribute('enabled')",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled",
"def enabled(self):\n return self._enabled"
] |
[
"0.7960439",
"0.77300847",
"0.76961887",
"0.76444125",
"0.73034716",
"0.7291146",
"0.7148115",
"0.7120933",
"0.7120933",
"0.7120933",
"0.7120933",
"0.7039014",
"0.70228976",
"0.6914976",
"0.68688226",
"0.6831247",
"0.6828009",
"0.67985916",
"0.67348653",
"0.6728246",
"0.67187434",
"0.67187434",
"0.67187434",
"0.67187434",
"0.6701585",
"0.66858923",
"0.66760993",
"0.662623",
"0.66021234",
"0.6577914",
"0.6417534",
"0.6397758",
"0.6344295",
"0.634396",
"0.6340458",
"0.62813383",
"0.62813383",
"0.62247515",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.6161303",
"0.60888094",
"0.6078631",
"0.6045923",
"0.60435206",
"0.6009779",
"0.59668714",
"0.59624684",
"0.59518826",
"0.59226984",
"0.5918909",
"0.5914035",
"0.5899928",
"0.5876646",
"0.5874522",
"0.5874522",
"0.5868789",
"0.58659536",
"0.5864838",
"0.5854439",
"0.58497095",
"0.5845004",
"0.5841497",
"0.583921",
"0.58321387",
"0.58306193",
"0.5813887",
"0.5793217",
"0.5787032",
"0.5774904",
"0.57739353",
"0.5770624",
"0.5770624",
"0.57579565",
"0.57435864",
"0.57435864",
"0.57435864",
"0.57435864",
"0.57435864",
"0.57435864",
"0.57435864",
"0.57435864"
] |
0.72787446
|
8
|
Sets the is_active of this UserBase. A boolean specifying whether the user account has been deactivated.
|
def is_active(self, is_active):
self._is_active = is_active
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_active(self, is_active: bool):\n if is_active is None:\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\")\n\n self._is_active = is_active",
"def is_active(self, is_active):\n \n self._is_active = is_active",
"def is_active(self, is_active):\n if self.local_vars_configuration.client_side_validation and is_active is None: # noqa: E501\n raise ValueError(\"Invalid value for `is_active`, must not be `None`\") # noqa: E501\n\n self._is_active = is_active",
"def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()",
"def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True",
"def set_active(self, active):\n self._active = active",
"def toggle_active(self, user):\n user.active = not user.active\n return True",
"def set_active(self, active):\n self.active = active",
"def active(self, value):\n self._active = value\n # Check if this is already linked with an object in the database.\n # If it is, change the username in the user account too.\n try:\n self.userprofile.user.is_active = value\n except UserProfile.DoesNotExist:\n pass",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def active(self, active):\n if active is None:\n raise ValueError(\"Invalid value for `active`, must not be `None`\") # noqa: E501\n\n self._active = active",
"def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False",
"def setActive(self, active):\n\n self._active = active",
"def activate_user(self, username):\n args = parser_activate.parse_args()\n isActive = request.json.get('isactive')\n\n query = \"\"\"UPDATE users SET isactive=%s WHERE username=%s\"\"\"\n values = isActive, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True",
"def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return",
"def active(self, activate):\n self.is_active = activate",
"def SetActive(self, b):\r\n\r\n self.active = b",
"def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active",
"def active(self, active):\n\n self._active = active",
"def active(self, active):\n\n self._active = active",
"def active(self, active):\n\n self._active = active",
"def active(self, active):\n\n self._active = active",
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def set_active(self):\n self.active = True",
"def is_active(self):\n return self.status == ACTIVE_USER",
"def activated(self, value: bool) -> None:\n\n if not isinstance(value, bool):\n raise TypeError(f\"<value> should be {bool}, {type(value)} given.\")\n\n self._activated = value",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n return False",
"def is_active_user(self):\n\n return self.is_active",
"def set_inactive(self):\n self.active = False",
"def is_active(self) -> bool:\n return self.__is_active",
"def is_active(self):\n return self.user.is_active",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n return True\n return False",
"def is_active(self) -> bool:\n return self._is_active",
"def set_active(self, employee_id, active):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET is_active = %s '\n 'WHERE id=%s;',\n (active, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def activated_user(self):\n user = self.signup_user_two()\n user.is_active = True\n user.save()\n return user",
"def set_active(self):\n if self.active is True:\n return\n self.active = True\n self.save()\n self.question_set.update(active=True)",
"def set_activated(self, value: bool) -> \"Logger\":\n\n self.activated = value\n\n return self",
"def update_active(self):\n self.set_active(0)\n self.state = INACTIVE",
"def update_active(self):\n self.set_active(0)\n self.state = INACTIVE",
"def update_active(self):\n self.set_active(0)\n self.state = INACTIVE",
"def update_active(self):\n self.set_active(0)\n self.state = INACTIVE",
"def update_active(self):\n self.set_active(0)\n self.state = INACTIVE",
"def is_active(self) -> bool:\n return self.active == \"active\"",
"def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user",
"def is_active(self) -> bool:\r\n return self.active",
"def test_activate_active_user(self):\n activate_user(self.user, self.request)\n self.assertEqual(self.user.is_active, True)",
"def inactive(self, inactive):\n\n self._inactive = inactive",
"def active_users(self, active_users):\n\n self._active_users = active_users",
"def activate(self):\n if not self.is_active:\n self.is_active = True\n self.activated_at = datetime.datetime.utcnow()\n import messaging # avoid circular import\n messaging.send_activated_emails(self)\n self.save()",
"def activate(self):\n self._is_active = True",
"def firewall_active(self, firewall_active):\n\n self._firewall_active = firewall_active",
"def is_active(self):\n return self._is_active",
"def is_active(self):\n return self._is_active",
"def is_active(self):\n return self._is_active",
"def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active",
"def show_active_only(self, show_active_only):\n\n self._show_active_only = show_active_only",
"def active(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"active\")",
"def make_active(self, request, queryset):\n queryset.update(is_active=True)",
"def activate(self) -> bool:\n self.active = True\n return self._activate()",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def active(self, active: NetworkWirelessAP | None) -> None:\n if self._active and self._active is not active:\n self._active.shutdown()\n\n self._active = active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def is_active(self):\n return self.active",
"def set_inactive(self):\n if self.active is False:\n return\n self.active = False\n self.save()\n self.question_set.update(active=False)",
"def is_active(self):\n return self._active",
"def confirm_login_allowed(self, user):\r\n if not user.is_active:\r\n raise forms.ValidationError(\r\n self.error_messages['inactive'],\r\n code='inactive',\r\n )",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def active(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"active\")",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def is_active(self):\r\n return self.active",
"def active(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"active\")",
"def setactive(self, scriptname):\n code, data = self.__send_command(\n \"SETACTIVE\", [scriptname.encode(\"utf-8\")])\n if code == \"OK\":\n return True\n return False",
"def activate(self):\r\n if self.activation_code == '':\r\n raise ValidationError('The member is already activated')\r\n signer = TimestampSigner()\r\n signer.unsign(self.activation_code, max_age=timedelta(days=2))\r\n self.hidden = False\r\n self.activation_code = ''\r\n self.joined_date = timezone.now()\r\n self.save()",
"def set_active(cls, name=None):\r\n if name is None:\r\n cls.active = True\r\n cls.non_actives = {} # Clear not actives\r\n else:\r\n if name in cls.non_actives:\r\n del cls.non_actives[name]",
"def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)",
"def update_user_active_at(sender, *args, **kwargs):\n if current_user.is_authenticated and not current_user.is_api_user():\n redis_connection.hset(LAST_ACTIVE_KEY, current_user.id, int(time.time()))",
"def active(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"active\")",
"def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False",
"def activate_user(cls, activation_key):\n #from registration.signals import user_activated\n \n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n db = DB_Session()\n if SHA1_RE.search(activation_key):\n query = db.query(RegistrationProfile)\n profile = query.filter(RegistrationProfile.activation_key == activation_key).one()\n if not profile:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = 1\n profile.activation_key = RegistrationProfile.ACTIVATED\n db.flush()\n db.commit()\n db.close()\n #user_activated.send(sender=self.model, user=user)\n return user\n return False",
"def is_activated(self):\n return self._activated",
"def active(self) -> bool:\n return pulumi.get(self, \"active\")",
"def deactivate(self):\n self._is_active = False",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True",
"def is_active(self):\n return True"
] |
[
"0.7834352",
"0.76405424",
"0.6955871",
"0.68170184",
"0.68170184",
"0.68170184",
"0.68170184",
"0.6813176",
"0.6730943",
"0.67180324",
"0.67115015",
"0.67037755",
"0.6689273",
"0.66405123",
"0.6595193",
"0.6499912",
"0.6499744",
"0.6490623",
"0.64750445",
"0.636067",
"0.6323239",
"0.6314528",
"0.6314528",
"0.6314528",
"0.6314528",
"0.61911154",
"0.6111165",
"0.61101836",
"0.60949457",
"0.60643184",
"0.606431",
"0.60483104",
"0.5972042",
"0.5964418",
"0.59213126",
"0.5839513",
"0.58356166",
"0.5798862",
"0.5784943",
"0.57732266",
"0.57697475",
"0.57697475",
"0.57697475",
"0.57697475",
"0.57697475",
"0.57291985",
"0.56698066",
"0.5662989",
"0.5640551",
"0.56378716",
"0.56215227",
"0.56195486",
"0.5596003",
"0.5579196",
"0.55703884",
"0.55703884",
"0.55703884",
"0.55579644",
"0.5549441",
"0.54890984",
"0.54728395",
"0.54649615",
"0.544072",
"0.543695",
"0.54117423",
"0.54117423",
"0.54117423",
"0.54117423",
"0.5393393",
"0.53598505",
"0.5340781",
"0.53377885",
"0.53377885",
"0.53377885",
"0.53377885",
"0.5303156",
"0.5303156",
"0.5303156",
"0.5303156",
"0.5303156",
"0.5279866",
"0.527946",
"0.5273894",
"0.5261845",
"0.52074236",
"0.52052635",
"0.5193351",
"0.5190379",
"0.5180651",
"0.5179598",
"0.5176678",
"0.5168167",
"0.5167821",
"0.515174",
"0.515174",
"0.515174",
"0.515174"
] |
0.7561219
|
3
|
Gets the is_guest of this UserBase. A boolean specifying whether the user is a guest user.
|
def is_guest(self):
return self._is_guest
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def guest(self):\n return self._guest",
"def is_guest(self, is_guest):\n\n self._is_guest = is_guest",
"def get_guest_user():\n\n guest = User.query.filter_by(username=\"guest\").first()\n\n if guest == None:\n guest = User(username=\"guest\",\n password_token=pbkdf2_sha256.hash(\"password\"))\n db.session.add(guest)\n db.session.commit()\n\n return guest",
"def guest(self) -> Optional[GuestContainer]:\n return self._guest",
"def guest_configuration_enabled(self) -> bool:\n return pulumi.get(self, \"guest_configuration_enabled\")",
"def is_superuser(self):\n return self.is_admin",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'",
"def is_guest_sharing_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsGuestSharingEnabled', self.handle))",
"def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True",
"def is_admin(self) -> bool:\n return self._is_admin",
"def is_user_admin(self, user):\n return user == self.created_by",
"def is_admin(self):\n return self._is_admin",
"def is_admin(self):\n return self._is_admin",
"def is_active_user(self):\n\n return self.is_active",
"def is_admin(self):\n return self.admin",
"def user_verified(self):\n return self.user.verified",
"def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]",
"def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False",
"def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False",
"def get_has_password(self, user):\n return True if user.password else False",
"def has_guest(self, key: int) -> bool:\r\n return self.guests.__contains__(key)",
"def is_admin(self):\r\n return self.admin",
"def get_is_portal_enabled(self):\n return self.is_portal_enabled",
"def is_logged_in(self):\n return self.__is_logged_in",
"def logged_in(self):\n return self.user is not None",
"def is_guest_active(self, *guest_of_list: GuestOf) -> bool:\n if len(guest_of_list) == 0:\n return self._is_any_guest_active()\n\n for guest_of in guest_of_list:\n usergroup = self._get_user_group(guest_of)\n if usergroup and usergroup.is_home:\n return True\n\n return False",
"def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False",
"def is_active(self):\n return self.user.is_active",
"def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)",
"def is_regular_user(user):\n return user.is_authenticated()",
"def session(self):\n\n if not hasattr(self, \"_session\"):\n self._session = Session(\"guest\")\n return self._session",
"def is_valid(self):\n return self.user.is_authenticated",
"def is_authenticated(self):\n return self.user is not None and self.state == AuthenticationOptions.authenticated",
"def is_authenticated(self):\n return bool(self._authenticated)",
"def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False",
"def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_authenticated(self):\n return self.authenticated",
"def is_active(self):\n return self.status == ACTIVE_USER",
"def test_guest_user_created(self):\n self.assertFalse(USER_MODEL.objects.all().exists())\n self.fill_session_cart()\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n self.assertTrue(USER_MODEL.objects.get().is_guest)",
"def is_not_admin(user):\n return not user.is_superuser",
"def get_guest_user_from_request(self, req: Request) -> Optional[GuestUser]:\n raw_token = req.headers.get(\n current_app.config[\"GUEST_TOKEN_HEADER_NAME\"]\n ) or req.form.get(\"guest_token\")\n if raw_token is None:\n return None\n\n try:\n token = self.parse_jwt_guest_token(raw_token)\n if token.get(\"user\") is None:\n raise ValueError(\"Guest token does not contain a user claim\")\n if token.get(\"resources\") is None:\n raise ValueError(\"Guest token does not contain a resources claim\")\n if token.get(\"rls_rules\") is None:\n raise ValueError(\"Guest token does not contain an rls_rules claim\")\n if token.get(\"type\") != \"guest\":\n raise ValueError(\"This is not a guest token.\")\n except Exception: # pylint: disable=broad-except\n # The login manager will handle sending 401s.\n # We don't need to send a special error message.\n logger.warning(\"Invalid guest token\", exc_info=True)\n return None\n else:\n return self.get_guest_user_from_token(cast(GuestToken, token))",
"def is_authenticated(self):\r\n return self.authenticated",
"def is_user_admin(request):\n return request.user.is_superuser",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def logged_in(self):\n return self.auth.get_user_by_session() is not None",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def is_authenticated(self) -> bool:\n return self._authenticated",
"def get_on_tunnel(self):\n return self._is_on_tunnel",
"def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False",
"def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False",
"def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None",
"def verify_user(self):\n verified = False\n if self.user.role.role_name == \"Admin\":\n verified = True\n\n return verified",
"def logged_in(self) -> bool:\n return self._logged_in",
"def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False",
"def is_authenticated(self):\n return bool(get_auth_token())",
"def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')",
"def isAdmin(user):\n return isUserType(user, Admin)",
"def get_isenabled(self):\n return self.isenabled",
"def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )",
"def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"",
"def is_logged_in(self):\n return self.router.token is not None",
"def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False",
"def has_verified_identity(self):\n is_verified = False\n try:\n is_verified = self.user.identity.status\n except ObjectDoesNotExist:\n pass\n return is_verified",
"def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]",
"def is_user_registered(self):\n return IsUserRegisteredRequest(self)",
"def is_user_event(self):\n return self._is_user_event",
"def is_authenticated(self):\n return True #self.authenticated",
"def GetIsDown(self):\n\n return self.isDown",
"def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True",
"def guest(self, guest):\n if guest is None:\n raise ValueError(\"Invalid value for `guest`, must not be `None`\")\n\n self._guest = guest",
"def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active",
"def is_admin(self, user):\n return user.name in self.admins",
"def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False",
"def is_authenticated(self) -> bool:\n return self.requester.uuid is not None",
"def admin(self):\n if self.is_admin:\n return True\n return False",
"def check_is_admin(current_user):\n return current_user['isAdmin'] == True",
"def is_logged_in():\n return 'user' in session",
"def IsPrivilegedUser(user_email, is_admin):\n return is_admin or (user_email and user_email.endswith('@google.com'))",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def is_authenticated(self):\n return True",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def is_admin(self):\n if self.type == 1:\n return True\n else:\n return False"
] |
[
"0.67751104",
"0.652903",
"0.6333875",
"0.5840743",
"0.5831468",
"0.57090145",
"0.54347944",
"0.53395015",
"0.5297339",
"0.52234644",
"0.5145995",
"0.514241",
"0.5127566",
"0.5127566",
"0.50955236",
"0.50857407",
"0.5083316",
"0.5064882",
"0.5058896",
"0.5041641",
"0.50172037",
"0.50107485",
"0.5009928",
"0.4997323",
"0.49940282",
"0.49795017",
"0.49791917",
"0.49771118",
"0.4947195",
"0.49445486",
"0.49412924",
"0.4904079",
"0.48953864",
"0.48915458",
"0.48830122",
"0.48812297",
"0.4879118",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.487837",
"0.48665902",
"0.48589176",
"0.48548183",
"0.48479038",
"0.48437935",
"0.48398867",
"0.48276848",
"0.48221236",
"0.48172742",
"0.47952735",
"0.478045",
"0.47778878",
"0.47708318",
"0.47550508",
"0.47550216",
"0.4731671",
"0.47306347",
"0.47261333",
"0.47034067",
"0.47017026",
"0.4663371",
"0.46487176",
"0.4622767",
"0.46191716",
"0.46158946",
"0.4598088",
"0.45802268",
"0.45795524",
"0.45729184",
"0.45688224",
"0.4567308",
"0.4563253",
"0.45618993",
"0.45536685",
"0.45431495",
"0.45389143",
"0.4533185",
"0.45313638",
"0.45274082",
"0.45248675",
"0.45172596",
"0.45107412",
"0.45107412",
"0.45107412",
"0.45107412",
"0.45107412",
"0.45107412",
"0.45107412",
"0.44899315",
"0.44841504"
] |
0.78533375
|
0
|
Sets the is_guest of this UserBase. A boolean specifying whether the user is a guest user.
|
def is_guest(self, is_guest):
self._is_guest = is_guest
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def guest(self, guest):\n if guest is None:\n raise ValueError(\"Invalid value for `guest`, must not be `None`\")\n\n self._guest = guest",
"def is_guest(self):\n return self._is_guest",
"def is_admin(self, is_admin):\n\n self._is_admin = is_admin",
"def is_admin(self, is_admin: bool):\n if is_admin is None:\n raise ValueError(\"Invalid value for `is_admin`, must not be `None`\") # noqa: E501\n\n self._is_admin = is_admin",
"def guest(self):\n return self._guest",
"def get_guest_user():\n\n guest = User.query.filter_by(username=\"guest\").first()\n\n if guest == None:\n guest = User(username=\"guest\",\n password_token=pbkdf2_sha256.hash(\"password\"))\n db.session.add(guest)\n db.session.commit()\n\n return guest",
"def set_guest_sharing_enabled(self, bVmGuestSharingEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetGuestSharingEnabled', self.handle, bVmGuestSharingEnabled)",
"def is_billing_admin(self, is_billing_admin):\n\n self._is_billing_admin = is_billing_admin",
"def test_guest_user_created(self):\n self.assertFalse(USER_MODEL.objects.all().exists())\n self.fill_session_cart()\n self.client.post(self.CHECKOUT_URL, self.build_checkout_form())\n self.assertTrue(USER_MODEL.objects.get().is_guest)",
"def guest_configuration_enabled(self) -> bool:\n return pulumi.get(self, \"guest_configuration_enabled\")",
"def set_is_default(self):\n self.is_default = True",
"def set_is_default(self):\n self.is_default = True",
"def is_default(self, is_default):\n\n self._is_default = is_default",
"def is_default(self, is_default: bool):\n\n self._is_default = is_default",
"def is_default(self, is_default):\n # type: (bool) -> None\n\n if is_default is not None:\n if not isinstance(is_default, bool):\n raise TypeError(\"Invalid type for `is_default`, type has to be `bool`\")\n\n self._is_default = is_default",
"def virtual_flag(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"virtual_flag must be bool.\")\n self._virtual_flag = value",
"def is_live(self, is_live):\n # type: (bool) -> None\n\n if is_live is not None:\n if not isinstance(is_live, bool):\n raise TypeError(\"Invalid type for `is_live`, type has to be `bool`\")\n\n self._is_live = is_live",
"def is_user_event(self, is_user_event):\n self._is_user_event = is_user_event",
"def _set_user_authenticated(user_id: int, device_id: int, value: bool = True) -> None:\n client = net_interface.get_user()\n client.is_authenticated = value\n client.user_id = user_id\n client.device_id = device_id",
"def is_flagged(self, is_flagged):\n \n self._is_flagged = is_flagged",
"def SetIsDown(self, isDown):\n\n self.isDown = isDown",
"def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False",
"def set_is_active(self, status):\n if self.role == User.ROLE_ADMIN:\n if not self.last_active_admin():\n self.is_active = (status == User.STATUS_ACTIVE)\n else:\n self.is_active = True\n self.status = User.STATUS_ACTIVE\n else:\n self.is_active = (status == User.STATUS_ACTIVE)",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def promote(self):\n if self.is_admin == True:\n pass\n self.is_admin = True\n User.save(self)",
"def set_user(self, user):\r\n self.user = user",
"def _setForBinding (self, value):\n if not isinstance(value, bool):\n raise TypeError(value)\n self.__forBinding = value\n return value",
"def super_admin(self, super_admin):\n\n self._super_admin = super_admin",
"def set_is_portal_enabled(self, is_portal_enabled):\n self.is_portal_enabled = is_portal_enabled",
"def is_ghost(self, is_ghost):\n\n self._is_ghost = is_ghost",
"def is_private(self, is_private):\n\n self._is_private = is_private",
"def is_guest_sharing_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsGuestSharingEnabled', self.handle))",
"def invalid_admin_state(isadmin):\n if isinstance(isadmin, bool):\n return False\n return True",
"def is_superuser(self):\n return self.is_admin",
"def set_cuda(self, is_cuda):\n self.is_cuda = is_cuda",
"def set(self, boolean):\n self._val = boolean",
"def set(self, attr, value=True):\n if type(value) == bool:\n self.__dict__['_'+attr] = value\n print attr, \"set to\", value\n else:\n print 'Value must be a bool, either \"True\" or \"False\" (no quotes)!'",
"def create_superuser(self, email, age, gender, password):\n user = self.create_user(\n email,\n password=password,\n age=age,\n gender=gender,\n )\n user.is_admin = True\n user.save(using=self._db)\n return user",
"def boolean(self, boolean):\n\n self._boolean = boolean",
"def set_user(self, user: User):\n self.__user = user",
"def set_user(self, user):\n self._user = user",
"def setAllowDelete(self, value, **kwa):\n\n if type(value) == BooleanType:\n self.fgField.allow_delete = value\n else:\n self.fgField.allow_delete = value == '1'",
"def init_flag(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"init_flag must be bool.\")\n self.set_init_flag(value)\n self._init_flag = value",
"def guest(self) -> Optional[GuestContainer]:\n return self._guest",
"def is_guest_active(self, *guest_of_list: GuestOf) -> bool:\n if len(guest_of_list) == 0:\n return self._is_any_guest_active()\n\n for guest_of in guest_of_list:\n usergroup = self._get_user_group(guest_of)\n if usergroup and usergroup.is_home:\n return True\n\n return False",
"def set_is_max(self, is_max):\n self.__is_max = is_max",
"def create_superuser(self, email, date_of_birth, password):\n user = self.create_user(email,\n password=password,\n date_of_birth=date_of_birth\n )\n user.is_admin = True\n user.save()\n return user",
"def is_muted(self, is_muted):\n # type: (bool) -> None\n\n if is_muted is not None:\n if not isinstance(is_muted, bool):\n raise TypeError(\"Invalid type for `is_muted`, type has to be `bool`\")\n\n self._is_muted = is_muted",
"def ipv6(self, ipv6):\n\n self._ipv6 = ipv6",
"def login_in_guest(self, sUserName, sUserPassword, nFlags = 0):\n\t\treturn Job(SDK.PrlVm_LoginInGuest(self.handle, sUserName, sUserPassword, nFlags)[0])",
"def _set_is_open_to_false():\r\n type(self).__is_open = False\r\n password_window.destroy()",
"def _json_restore_guest_password(self, value):\r\n if not isinstance(value, dict):\r\n raise SDKException('Subclient', '101')\r\n\r\n self._json_restore_guest_password = {\r\n \"userName\": value.get(\"user_name\", ''),\r\n \"password\": value.get(\"password\", '')\r\n }",
"def is_featured(self, is_featured):\n self._is_featured = is_featured",
"def _start_guestfs(self):\n if self.guestfs is None:\n self.guestfs = guestfs.GuestFS()\n self.guestfs.add_drive_opts(self.disks[0], format='qcow2', readonly=0)\n self.guestfs.launch()\n\n partition = self.settings['partition']\n if isinstance(partition, int):\n partition_list = self.guestfs.list_partitions()\n partition_name = partition_list[partition]\n else:\n partition_name = partition\n self.guestfs.mount_options(\"\", partition_name, \"/\")",
"def SetCurrentUser(self, email, user_id='123456', is_admin=False):\n email = email or ''\n user_id = user_id or ''\n is_admin = '1' if is_admin else '0'\n self.testbed.setup_env(user_is_admin=is_admin,\n user_email=email,\n user_id=user_id,\n overwrite=True)",
"def add_guest_module_config(self, module='', config={}):\n\n self._set_config(module, config, 'guest')\n return self",
"def activated(self, value: bool) -> None:\n\n if not isinstance(value, bool):\n raise TypeError(f\"<value> should be {bool}, {type(value)} given.\")\n\n self._activated = value",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n return True\n return False",
"def git_password_user_attribute(self, git_password_user_attribute):\n self._git_password_user_attribute = git_password_user_attribute",
"def set_has_fan(self, value: bool = True):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"has_fan\", value))\r\n self._has_fan = value",
"def promote_user(self, username):\n parser_promote.add_argument('isadmin', choices=[\"True\", \"False\"],\n required=True, nullable=False,\n help=\"(Accepted values: True, False)\"\n )\n args = parser_promote.parse_args()\n isAdmin = request.json.get('isadmin')\n\n query = \"\"\"UPDATE users SET isadmin=%s WHERE username=%s\"\"\"\n values = isAdmin, username\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return True",
"def is_ephemeral(self, is_ephemeral):\n self._is_ephemeral = is_ephemeral",
"def has_guest(self, key: int) -> bool:\r\n return self.guests.__contains__(key)",
"def toggle_active(self, user):\n user.active = not user.active\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True",
"def toggle_active(self, user):\n user.active = not user.active\n return True",
"def set_visible(self, is_visible):\n self._data['is_visible'] = 1 if is_visible else 0",
"def is_user_admin(self, user):\n return user == self.created_by",
"def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")",
"def send_user_invitation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"send_user_invitation\")",
"def create_superuser(self,email,password):\n user = self.create_user(email,password)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user",
"def sr_login_as_guest(self, sim_type=None):\n self.sr_sim_type_set(sim_type)\n self.cookie_jar.clear()\n # Get a cookie\n self.sr_get(\"authState\")\n self.sr_get(\"authGuestLogin\", {\"simulation_type\": self.sr_sim_type})\n return self._verify_and_save_uid()",
"def set_is_default_org(self, is_default_org):\n self.is_default_org = is_default_org",
"def set_gateway(self, bool_value):\n self.chkbtn_gateway.set(bool_value)",
"def set_is_staff(self, role):\n self.is_staff = (role != User.ROLE_USER)",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n return False",
"def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield",
"def session(self):\n\n if not hasattr(self, \"_session\"):\n self._session = Session(\"guest\")\n return self._session",
"def test_active_guest_allowed(self) -> None:\n\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200,\n payload={\n \"active\": True,\n \"sub\": SUBJECT,\n \"scope\": \" \".join([MATRIX_GUEST_SCOPE, MATRIX_DEVICE_SCOPE]),\n \"username\": USERNAME,\n },\n )\n )\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n requester = self.get_success(\n self.auth.get_user_by_req(request, allow_guest=True)\n )\n self.http_client.get_json.assert_called_once_with(WELL_KNOWN)\n self.http_client.request.assert_called_once_with(\n method=\"POST\", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY\n )\n self._assertParams()\n self.assertEqual(requester.user.to_string(), \"@%s:%s\" % (USERNAME, SERVER_NAME))\n self.assertEqual(requester.is_guest, True)\n self.assertEqual(\n get_awaitable_result(self.auth.is_server_admin(requester)), False\n )\n self.assertEqual(requester.device_id, DEVICE)",
"def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def user(self, user):\n\n self._user = user",
"def gender(self, gender):\n\n self._gender = gender",
"def gender(self, gender):\n\n self._gender = gender",
"def gender(self, gender):\n\n self._gender = gender",
"def gender(self, gender):\n\n self._gender = gender",
"def is_bot(self, is_bot):\n\n self._is_bot = is_bot",
"def set_guest_sharing_auto_mount(self, bVmGuestSharingAutoMount):\n\t\tcall_sdk_function('PrlVmCfg_SetGuestSharingAutoMount', self.handle, bVmGuestSharingAutoMount)",
"def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status",
"def set_admin_password(self, instance, new_pass):\n pass",
"def set_admin_password(self, instance, new_pass):\n raise NotImplementedError()",
"def is_admin(self) -> bool:\n return self._is_admin",
"def create_superuser(self, email, password):\n user = self.create_user(email, password=password)\n user.is_admin = True\n user.save(using=self._db)\n return user",
"def is_protected(self, is_protected):\n \"\"\"\n if is_protected is None:\n raise ValueError(\"Invalid value for `is_protected`, must not be `None`\")\n \"\"\"\n\n self.container['is_protected'] = is_protected"
] |
[
"0.63713956",
"0.63543206",
"0.5549084",
"0.52629703",
"0.50986946",
"0.48912513",
"0.47999674",
"0.47975162",
"0.474734",
"0.4700092",
"0.46394247",
"0.46394247",
"0.4637681",
"0.46297148",
"0.45938712",
"0.4591176",
"0.45438093",
"0.4535325",
"0.45209828",
"0.45110002",
"0.4494369",
"0.44848543",
"0.44292068",
"0.44109356",
"0.4408892",
"0.43858194",
"0.43818155",
"0.43723354",
"0.43681446",
"0.43606648",
"0.4344984",
"0.43420783",
"0.43397474",
"0.43323416",
"0.43322802",
"0.43249837",
"0.43175665",
"0.43146795",
"0.42592818",
"0.42385116",
"0.42349473",
"0.42276755",
"0.42253488",
"0.4213783",
"0.42118856",
"0.4207",
"0.41856554",
"0.4178355",
"0.41633195",
"0.41497323",
"0.41413862",
"0.41357368",
"0.4122307",
"0.40940586",
"0.40618196",
"0.4060638",
"0.40598717",
"0.40569255",
"0.40566838",
"0.40558666",
"0.40539855",
"0.40508598",
"0.40508404",
"0.40422642",
"0.40387014",
"0.40190455",
"0.4012609",
"0.40026385",
"0.40026385",
"0.3997731",
"0.39971462",
"0.39927468",
"0.3991159",
"0.39895815",
"0.39843762",
"0.3983847",
"0.39833435",
"0.39823243",
"0.39778608",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3964942",
"0.3961851",
"0.3961851",
"0.3961851",
"0.3961851",
"0.39581674",
"0.3953969",
"0.39506075",
"0.39485353",
"0.39346904",
"0.39296207",
"0.3928996",
"0.392857"
] |
0.82178175
|
0
|
Gets the timezone of this UserBase. The time zone of the user.
|
def timezone(self):
return self._timezone
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def timezone(self):\n return self._tz",
"def timezone(self) -> str:\n return self._tz",
"def timezone(self):\n return self.data.get(\"timezone\")",
"def TimeZone(self):\n if self.force_auto_sync:\n self.get('TimeZone')\n return self._TimeZone",
"def get_timezone(self) -> str:\n return self.AD.time_zone",
"def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")",
"def time_zone(self):\n # type: () -> string_types\n return self._time_zone",
"def timezone(self):\n tz_data = self._router_request(\n self._make_request_data(\n 'getTimeZone',\n data=dict()\n )\n )\n\n return tz_data['data']",
"def timezone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"timezone\")",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def timezone():\n\n return time.timezone",
"def time_zone(self):\n\n\t\tg = geocoders.GoogleV3()\n\n\t\t#Gives the name of the timezone, ex: Africa/Luanda\n\t\ttimezone_name = str(g.timezone((self.latitude_value(), self.longitude_value())))\n\n\t\t#Returns the numeric value of the timezone, ex: +0100\n\t\treturn int(pytz.timezone(timezone_name).localize(datetime.datetime(2011,1,1)).strftime('%z'))/100",
"def time_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_zone\")",
"def local_tz(self):\n return pytz.timezone(self.calendar.timezone)",
"def query_timezone(self):\n return self._query_timezone",
"def get_timezone():\n localTimezone = request.args.get('timezone')\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n userId = request.args.get('login_as')\n localTimezone = users[int(userId)]['timezone']\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n return app.config['BABEL_DEFAULT_TIMEZONE']",
"def get_timezone():\n return dates.get_timezone(_get_tz())",
"async def fetch_user_tz(self, user_id: uuid.UUID):\n olson_tz = await self.fetchval(\n \"select timezone from users where id = $1\", user_id\n )\n\n if not olson_tz:\n return pytz.timezone(\"UTC\")\n\n return pytz.timezone(olson_tz)",
"def tz(self) -> tzinfo | None:\n # GH 18595\n return getattr(self.dtype, \"tz\", None)",
"def tzinfo(self) -> tzinfo | None:\n return self.tz",
"def time_zone_name(self):\n return icemac.addressbook.preferences.utils.get_time_zone_name()",
"def tz(self, tz):\n return timezones.maybe_get_tz('dateutil/' + tz)",
"def get_timezone(time_zone=''):\n return pytz.timezone(time_zone)",
"def now(self):\n if 'timezone' in self._data:\n return pytz.utc.localize(datetime.datetime.utcnow()).astimezone(pytz.timezone(self._data['timezone']))\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())",
"def grouping_timezone(self):\n return tz.gettz(self._summariser.grouping_time_zone)",
"def user_timezone(dt, user):\n\ttz = get_config('TIME_ZONE', 'UTC')\n\tif user.is_authenticated():\n\t\tif user.profile.timezone:\n\t\t\ttz = user.profile.timezone\n\ttry:\n\t\tresult = dt.astimezone(pytz.timezone(tz))\n\texcept ValueError:\n\t\t# The datetime was stored without timezone info, so use the\n\t\t# timezone configured in settings.\n\t\tresult = dt.replace(tzinfo=pytz.timezone(get_config('TIME_ZONE', 'UTC'))) \\\n\t\t\t.astimezone(pytz.timezone(tz))\n\treturn result",
"def default(self):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\treturn self.TimeZoneProperties.Get('(ss)', BUS_NAME, 'Timezone')",
"def get_zone(self):\n return self.project.get_flow().get_zone_of_object(self)",
"def recurrence_time_zone(self):\n return self.__recurrence_time_zone",
"def zone(self) -> str:\n return pulumi.get(self, \"zone\")",
"def zone(self) -> str:\n return pulumi.get(self, \"zone\")",
"def _get_tz():\n return 'UTC'",
"def zone(self):\n return self._zone",
"def zone(self) -> str:\n return self._zone",
"def utc(self):\n return self._utc",
"def getTimezone(profile):\n try:\n return timezone(profile['timezone'])\n except:\n return None",
"def getZoneId(self):\n return self.zoneId",
"def GetTimeAndZone(self):\n return self.hour, self.minute, self.second, self.zDirection, self.zOffset",
"def tzoffset(self):\n return _tzoffset(self._tz, self._t)",
"def timezone():\n \n pass",
"def get_tz(self):\n for _, element in etree.iterparse(self.source):\n if element.tag == TIMEZONE:\n return float(element.text or 0.0)",
"def local_zone():\n return get_localzone()",
"def timezoneNaive(self):\n try:\n return self._timezone_naive\n except AttributeError:\n return None",
"def getTaskZoneId(self):\n return self.getZoneId()",
"def as_timezone(self, tz):\n offset = tz_to_offset(tz)\n if TIMEZONES.get(tz, {}).get('dst') and is_dst(self):\n offset += 1\n offset_secs = offset * 3600\n return DateTime(*time.gmtime(self.unixtime() + offset_secs))",
"def dt_tz(self):\n return DateTimeDefault.register(pandas.Series.dt.tz)(self)",
"def original_end_time_zone(self):\n if \"originalEndTimeZone\" in self._prop_dict:\n return self._prop_dict[\"originalEndTimeZone\"]\n else:\n return None",
"def get_timzone_offset(self, timezone):\n raise NotImplementedError",
"def get_timezone(timezone: str=None) -> datetime.tzinfo:\n if timezone[0] in ('+', '-'):\n timezone = 'UTC' + timezone\n result = tz.gettz(timezone)\n if result is not None and not hasattr(result, '_timezone_'):\n setattr(result, '_timezone_', timezone[3:] if timezone.startswith('UTC') and len(timezone) > 3 else timezone)\n return result",
"def get_timezone_info(self, timezone_name):\n try:\n return self._db[timezone_name]\n except KeyError:\n raise TimezoneNotFoundError(timezone_name)",
"def get_time(self):\n return datetime.datetime.now(self.time_zone)",
"def time_zones(self) -> localedata.LocaleDataDict:\n return self._data['time_zones']",
"def timetz(self):\n return time(\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self._tzinfo,\n fold=self.fold,\n )",
"def timezone(self, latitude, longitude):\n root = self._api_call('GET', 'timezone', lat=latitude, lng=longitude)\n return { 'timezoneId': root[0].find('timezoneId').text,\n 'rawOffset': root[0].find('rawOffset').text, }",
"def tzname(self):\n if self._tzinfo is None:\n return None\n name = self._tzinfo.tzname(self._realized_if_concrete_tzinfo())\n _check_tzname(name)\n return name",
"def tzname(self):\n if self._tzinfo is None:\n return None\n name = self._tzinfo.tzname(None)\n _check_tzname(name)\n return name",
"def access_zone(self):\n return self._access_zone",
"def get_slack_user_timezone(email):\n\n users = get_slack_users()\n for member in users:\n if member.get('profile').get('email') == email:\n return member.get('tz')\n return ''",
"def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> str:\n return pulumi.get(self, \"zone_id\")",
"def created_time_utc(self) -> str:\n return pulumi.get(self, \"created_time_utc\")",
"def created_time_utc(self) -> Optional[str]:\n return pulumi.get(self, \"created_time_utc\")",
"def convertToUsersTimeZone(self, timestamp):\n user = self.zport.dmd.ZenUsers.getUserSettings()\n if user.timezone:\n utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))\n tz = pytz.timezone(user.timezone)\n tval = tz.normalize(utc_dt.astimezone(tz))\n return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+\" \"+user.timeFormat))\n return isoDateTime(timestamp)",
"def _get_tzinfo(zonelabel):\n return moment.tzinfo(zonelabel) if zonelabel else _get_global_tz()",
"def get_timezone():\n try:\n for line in open('/etc/sysconfig/clock'):\n field, value = line.split('=')\n if field.strip() == 'ZONE':\n return value.replace('\"', '').strip()\n return \"\"\n except IOError:\n return \"\"",
"def user_country(self):\n return utils.to_country(lib.sp_session_user_country(self._sp_session))",
"def get_timezone(latitude, longitude):\n tf = TimezoneFinder()\n return pytz.timezone(tf.timezone_at(lng=longitude, lat=latitude))",
"def dst(self):\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.dst(self._realized_if_concrete_tzinfo())\n _check_utc_offset(\"dst\", offset)\n return offset",
"def availability_zone(self) -> str:\n return pulumi.get(self, \"availability_zone\")",
"def utcoffset(self):\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.utcoffset(self._realized_if_concrete_tzinfo())\n _check_utc_offset(\"utcoffset\", offset)\n return offset",
"def zone_id(self) -> Optional[str]:\n return pulumi.get(self, \"zone_id\")",
"def __get_timezone_offset(self, timestamp):\n try:\n ts = int(timestamp)\n except ValueError:\n LOG.error(\"__get_timezone_offset() given non-numeric input {}\".format(timestamp))\n if self.__tzname is None:\n r = self.ctx.p4.run('counter', '-u', p4gf_const.P4GF_COUNTER_TIME_ZONE_NAME)\n value = p4gf_util.first_value_for_key(r, 'value')\n if value == '0' or value is None:\n # Upgrade from an EA system, perhaps, in which the upgrade counters\n # have been set but the later changes where not applied during init.\n msg = _(\"Counter '{}' not set, using UTC as default.\"\n \" Change this to your Perforce server's time zone.\") \\\n .format(p4gf_const.P4GF_COUNTER_TIME_ZONE_NAME)\n LOG.warn(msg)\n value = NTR('UTC')\n self.__tzname = value\n try:\n mytz = pytz.timezone(self.__tzname)\n except pytz.exceptions.UnknownTimeZoneError:\n LOG.warn(\"Time zone name '{}' unrecognized, using UTC as default\".format(self.__tzname))\n mytz = pytz.utc\n LOG.debug(\"__get_timezone_offset({}) with {}\".format(ts, mytz))\n dt = datetime.datetime.fromtimestamp(ts, tz=pytz.utc)\n ct = dt.astimezone(mytz)\n return ct.strftime('%z')",
"def original_start_time_zone(self):\n if \"originalStartTimeZone\" in self._prop_dict:\n return self._prop_dict[\"originalStartTimeZone\"]\n else:\n return None",
"def astimezone(self, tz=LOCAL):\n if tz is None:\n tz = LOCAL\n tz = parser.get_timezone(tz)\n return super(self.__class__, self).astimezone(tz)",
"def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")",
"def zone_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"zone_id\")",
"def created_time_utc(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"created_time_utc\")",
"def dst(self):\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.dst(None)\n _check_utc_offset(\"dst\", offset)\n return offset",
"def created_time_utc(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time_utc\")",
"def _get_local_tz(module, timezone='UTC'):\n if platform.system() == 'Linux':\n timedatectl = get_bin_path('timedatectl')\n if timedatectl is not None:\n rcode, stdout, stderr = module.run_command(timedatectl)\n if rcode == 0 and stdout:\n line = _findstr(stdout, 'Time zone')\n full_tz = line.split(\":\", 1)[1].rstrip()\n timezone = full_tz.split()[0]\n return timezone\n else:\n module.warn('Incorrect timedatectl output. Timezone will be set to UTC')\n else:\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'SunOS':\n if os.path.exists('/etc/default/init'):\n for line in get_file_content('/etc/default/init', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/default/init. Assuming UTC')\n\n elif re.match('^Darwin', platform.platform()):\n systemsetup = get_bin_path('systemsetup')\n if systemsetup is not None:\n rcode, stdout, stderr = module.execute(systemsetup, '-gettimezone')\n if rcode == 0 and stdout:\n timezone = stdout.split(':', 1)[1].lstrip()\n else:\n module.warn('Could not run systemsetup. Assuming UTC')\n else:\n module.warn('Could not find systemsetup. Assuming UTC')\n\n elif re.match('^(Free|Net|Open)BSD', platform.platform()):\n if os.path.exists('/etc/timezone'):\n timezone = get_file_content('/etc/timezone')\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n elif platform.system() == 'AIX':\n aix_oslevel = int(platform.version() + platform.release())\n if aix_oslevel >= 61:\n if os.path.exists('/etc/environment'):\n for line in get_file_content('/etc/environment', '').splitlines():\n if line.startswith('TZ='):\n timezone = line.split('=', 1)[1]\n return timezone\n else:\n module.warn('Could not find /etc/environment. Assuming UTC')\n else:\n module.warn('Cannot determine timezone when AIX os level < 61. Assuming UTC')\n\n else:\n module.warn('Could not find /etc/timezone. Assuming UTC')\n\n return timezone",
"def zone_name(self):\n return self._zone_name",
"def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")",
"def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")",
"def get_current_zone() -> Zone:\n return services.current_zone()",
"def shn_user_utc_offset():\n\n if auth.is_logged_in():\n return db(db.auth_user.id == session.auth.user.id).select(db.auth_user.utc_offset, limitby=(0, 1)).first().utc_offset\n else:\n try:\n offset = db().select(db.s3_setting.utc_offset, limitby=(0, 1)).first().utc_offset\n except:\n offset = None\n return offset",
"def get_tz_offset(self) -> float:\n return self.AD.tz.utcoffset(self.datetime()).total_seconds() / 60",
"def GetAuditedSystemTimezone():\n\n Timezone = False\n try:\n Timezone = os.path.realpath(os.path.join(ROOT_PATH, \"etc/localtime\"))\n Timezone = Timezone.split(\"/\")\n except Exception as e:\n PrintAndLog(u\"Cannot read the timezone\" + str(e.args).decode(\"utf-8\"), \"ERROR\")\n\n return Timezone[-2] + \"/\" + Timezone[-1]",
"def tzname (self, dt):\n return self.__tzName",
"def display_tzname(self):\n return settings.TIME_ZONES_BY_LANG.get(self.language, settings.TIME_ZONE)",
"def utcoffset(self):\n if self._tzinfo is None:\n return None\n offset = self._tzinfo.utcoffset(None)\n _check_utc_offset(\"utcoffset\", offset)\n return offset",
"def availability_zone(self) -> Optional[str]:\n return pulumi.get(self, \"availability_zone\")",
"def local_timezone() -> Timezone | FixedTimezone:\n return get_local_timezone()",
"def start_hour_utc(self) -> int:\n return pulumi.get(self, \"start_hour_utc\")"
] |
[
"0.7857188",
"0.7602793",
"0.75864065",
"0.7564862",
"0.7543805",
"0.7350593",
"0.7301738",
"0.7264471",
"0.71459556",
"0.71003807",
"0.7097807",
"0.68156534",
"0.68122673",
"0.6797889",
"0.6775188",
"0.6735393",
"0.6680171",
"0.66326815",
"0.65203756",
"0.6517433",
"0.65140307",
"0.6435843",
"0.6274222",
"0.60446715",
"0.6034857",
"0.6012215",
"0.6001349",
"0.5965143",
"0.59490776",
"0.5936855",
"0.5936855",
"0.5930653",
"0.5878761",
"0.58691174",
"0.5850661",
"0.58064264",
"0.5793635",
"0.576251",
"0.5747761",
"0.56895816",
"0.56674224",
"0.5650921",
"0.5606433",
"0.5597839",
"0.5578214",
"0.5569189",
"0.5568294",
"0.55423844",
"0.55399215",
"0.5536928",
"0.5534168",
"0.5529727",
"0.5517651",
"0.55084735",
"0.5498491",
"0.5486723",
"0.54723144",
"0.5455046",
"0.5429202",
"0.5429202",
"0.5429202",
"0.5429202",
"0.5429202",
"0.5425535",
"0.5411567",
"0.5391172",
"0.5387301",
"0.5383154",
"0.53581494",
"0.53496253",
"0.53454447",
"0.5321236",
"0.5314853",
"0.53028697",
"0.5301659",
"0.5281775",
"0.5248128",
"0.5240171",
"0.5240171",
"0.5240171",
"0.5240171",
"0.5232785",
"0.52326804",
"0.52267927",
"0.52191716",
"0.5211465",
"0.52090263",
"0.52090263",
"0.52066386",
"0.5196129",
"0.5187684",
"0.5179017",
"0.5174092",
"0.5164558",
"0.51558733",
"0.51319176",
"0.51147217",
"0.5104545"
] |
0.7784589
|
3
|
Sets the timezone of this UserBase. The time zone of the user.
|
def timezone(self, timezone):
self._timezone = timezone
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def set_timezone():\n tz_name = request.vars.name\n # Validates the name.\n from pytz import all_timezones_set\n if tz_name in all_timezones_set:\n session.user_timezone = tz_name\n # If the user is logged in, sets also the timezone for the user.\n # Otherwise, it can happen that a user expires a cookie, then click on edit.\n # When the user is presented the edit page, the translation is done according to UTC,\n # but when the user is done editing, due to autodetection, the user is then in\n # it's own time zone, and the dates of an assignment change.\n # This really happened.\n if auth.user is not None:\n db.auth_user[auth.user.id] = dict(user_timezone = tz_name)\n logger.info(\"Set timezone to: %r\" % tz_name)\n else:\n logger.warning(\"Invalid timezone received: %r\" % tz_name)",
"def set_timezone(self, to_tz):\n self.startdate = to_tz.localize(self.startdate.replace(tzinfo=None))\n self.enddate = to_tz.localize(self.enddate.replace(tzinfo=None))\n self.timezone = to_tz",
"def time_zone(self, time_zone):\n\n self._time_zone = time_zone",
"def set_timezone(conn, timezone):\n with Tx(conn) as c:\n c.execute('SET timezone = %s', (timezone,))",
"def update(self, request, *args, **kwargs):\n if not settings.DEBUG:\n log_msg = \"User %s setting timezone to %s\"\n logger.info(log_msg % (request.user.id, request.data.get('timezone', None)))\n request.data['user'] = request.user.id\n return super(UserProfileViewSet, self).update(request, *args, **kwargs)",
"def set(self, tzone):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\tself.TimeZone.SetTimezone(\n\t\t\t'(sb)',\n\t\t\ttzone,\n\t\t\tTrue # User interaction\n\t\t)",
"def set_time_zone(self, tz_str): # TODO Figure out how to implement this as a nonfeature property\n if tz_str in pytz.all_timezones:\n self._data['timezone'] = tz_str\n else:\n raise InvalidTimezone()",
"def timezone(self):\n return self._tz",
"def timezone(self):\n return self._timezone",
"def timezone(self):\n return self._timezone",
"def timezone(self):\n return self._timezone",
"def sync_timezone(self, sync_timezone):\n\n self._sync_timezone = sync_timezone",
"def timezone(self) -> str:\n return self._tz",
"def cron_time_zone(self, cron_time_zone):\n\n self._cron_time_zone = cron_time_zone",
"def timezone(self):\n return self.data.get(\"timezone\")",
"def TimeZone(self):\n if self.force_auto_sync:\n self.get('TimeZone')\n return self._TimeZone",
"async def set_time_zone(tz_name: str) -> pytz.tzinfo:\n\n try:\n tz = pytz.timezone(tz_name or \"UTC\")\n except pytz.UnknownTimeZoneError:\n raise InvalidTimeZoneError(tz_name)\n\n # await db.update_system_field(conn, tz.zone)\n return tz",
"def timezone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"timezone\")",
"def time_zone(self, time_zone):\n # type: (string_types) -> None\n\n if time_zone is not None:\n if not isinstance(time_zone, string_types):\n raise TypeError(\"Invalid type for `time_zone`, type has to be `string_types`\")\n\n self._time_zone = time_zone",
"def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data",
"def astimezone(self, tz=LOCAL):\n if tz is None:\n tz = LOCAL\n tz = parser.get_timezone(tz)\n return super(self.__class__, self).astimezone(tz)",
"def set_timezone(tz=None, deploy=False):\n\n if not tz:\n raise CommandExecutionError(\"Timezone name option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/timezone\",\n \"element\": \"<timezone>{}</timezone>\".format(tz),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret",
"def time_zone():\n return timezone('Etc/GMT-10')",
"def process_request(self, request):\n if request.user.is_authenticated():\n if request.user.timezone:\n timezone.activate(pytz.timezone(request.user.timezone))",
"def timezone():\n\n return time.timezone",
"def may_the_timezone_be_with_it(self):\n self.timestamp = pytz.utc.localize(self.timestamp)",
"def timezone():\n \n pass",
"def local_tz(self):\n return pytz.timezone(self.calendar.timezone)",
"def user_timezone(dt, user):\n\ttz = get_config('TIME_ZONE', 'UTC')\n\tif user.is_authenticated():\n\t\tif user.profile.timezone:\n\t\t\ttz = user.profile.timezone\n\ttry:\n\t\tresult = dt.astimezone(pytz.timezone(tz))\n\texcept ValueError:\n\t\t# The datetime was stored without timezone info, so use the\n\t\t# timezone configured in settings.\n\t\tresult = dt.replace(tzinfo=pytz.timezone(get_config('TIME_ZONE', 'UTC'))) \\\n\t\t\t.astimezone(pytz.timezone(tz))\n\treturn result",
"def time_zone(self) -> str:\n return pulumi.get(self, \"time_zone\")",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', False)\n self._column_type = Time(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def _timezone_observer(self, timezone):\n if timezone:\n self.timezone = timezone\n for assignment in self.assignments:\n assignment.timezone = timezone",
"async def fetch_user_tz(self, user_id: uuid.UUID):\n olson_tz = await self.fetchval(\n \"select timezone from users where id = $1\", user_id\n )\n\n if not olson_tz:\n return pytz.timezone(\"UTC\")\n\n return pytz.timezone(olson_tz)",
"async def _timezone(self, ctx: commands.Context, tz: str = None):\n self.check_if_exist(ctx.guild)\n\n self.daily_guilds[str(ctx.guild.id)][\"tz\"] = tz\n self.daily_info.update(\"guilds\", self.daily_guilds)\n await ctx.reply(\"New daily timezone is {0}\".format(tz))",
"def time_zone(self):\n # type: () -> string_types\n return self._time_zone",
"def _convertTZ(self):\n tz = timezone.get_current_timezone()\n dtstart = self['DTSTART']\n dtend = self['DTEND']\n if dtstart.zone() == \"UTC\":\n dtstart.dt = dtstart.dt.astimezone(tz)\n if dtend.zone() == \"UTC\":\n dtend.dt = dtend.dt.astimezone(tz)",
"def tz(self, tz):\n return timezones.maybe_get_tz('dateutil/' + tz)",
"def query_timezone(self, query_timezone):\n\n self._query_timezone = query_timezone",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', True)\n self._column_type = CoreTimeStamp(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def __init__(self, *args, **kwargs):\n\n timezone = kwargs.pop('timezone', True)\n self._column_type = CoreDateTime(timezone=timezone)\n\n super().__init__(*args, **kwargs)",
"def __init__(self, timezone=None, utc=None):\n\n self._timezone = None\n self._utc = None\n\n if timezone is not None:\n self.timezone = timezone\n if utc is not None:\n self.utc = utc",
"def time_zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"time_zone\")",
"def convertToUsersTimeZone(self, timestamp):\n user = self.zport.dmd.ZenUsers.getUserSettings()\n if user.timezone:\n utc_dt = pytz.utc.localize(datetime.utcfromtimestamp(int(timestamp)))\n tz = pytz.timezone(user.timezone)\n tval = tz.normalize(utc_dt.astimezone(tz))\n return tval.strftime(convertJsTimeFormatToPy(user.dateFormat+\" \"+user.timeFormat))\n return isoDateTime(timestamp)",
"def timezone(self):\n tz_data = self._router_request(\n self._make_request_data(\n 'getTimeZone',\n data=dict()\n )\n )\n\n return tz_data['data']",
"def query_timezone(self):\n return self._query_timezone",
"def disable_user_tzs(self, user_id):\n self.set_user_tzs(user_id, [])",
"def utc(self, utc):\n\n self._utc = utc",
"def get_timezone():\n localTimezone = request.args.get('timezone')\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n userId = request.args.get('login_as')\n localTimezone = users[int(userId)]['timezone']\n if localTimezone in pytz.all_timezones:\n return localTimezone\n else:\n raise pytz.exceptions.UnknownTimeZoneError\n return app.config['BABEL_DEFAULT_TIMEZONE']",
"def get_timezone(self) -> str:\n return self.AD.time_zone",
"def time_zone_name(self):\n return icemac.addressbook.preferences.utils.get_time_zone_name()",
"def dt_tz(self):\n return DateTimeDefault.register(pandas.Series.dt.tz)(self)",
"def tz(self) -> tzinfo | None:\n # GH 18595\n return getattr(self.dtype, \"tz\", None)",
"def time_zone(self):\n\n\t\tg = geocoders.GoogleV3()\n\n\t\t#Gives the name of the timezone, ex: Africa/Luanda\n\t\ttimezone_name = str(g.timezone((self.latitude_value(), self.longitude_value())))\n\n\t\t#Returns the numeric value of the timezone, ex: +0100\n\t\treturn int(pytz.timezone(timezone_name).localize(datetime.datetime(2011,1,1)).strftime('%z'))/100",
"def utcoffset(self, dt):\n raise NotImplementedError(\"tzinfo subclass must override utcoffset()\")",
"def tzinfo(self) -> tzinfo | None:\n return self.tz",
"def __init__(self, case, timezone=pytz.UTC, override_user_id=None):\n self.case = case\n self.timezone = timezone\n self.override_user_id = override_user_id",
"def __init__(self, case, timezone=pytz.UTC, override_user_id=None):\n self.case = case\n self.timezone = timezone\n self.override_user_id = override_user_id",
"def utm_zone_start(self, utm_zone_start):\n\n self._utm_zone_start = utm_zone_start",
"def as_timezone(self, tz):\n offset = tz_to_offset(tz)\n if TIMEZONES.get(tz, {}).get('dst') and is_dst(self):\n offset += 1\n offset_secs = offset * 3600\n return DateTime(*time.gmtime(self.unixtime() + offset_secs))",
"def localtime_for_timezone(value, timezone):\r\n return adjust_datetime_to_timezone(value, settings.TIME_ZONE, timezone)",
"def set_time_by_timezone(df):\n df = set_city_time_by_timezone(df, 1078, 3)\n df = set_city_time_by_timezone(df, 22390, 4)\n df = set_city_time_by_timezone(df, 22430, 4)\n df = set_city_time_by_timezone(df, 22438, 5)\n return df",
"def tz(self, tz: float) -> 'Component':\n return self.translate(tz=tz)",
"def enforce_timezone(self, value):\n try:\n tz = timezone._active.value\n if (self.default_timezone is not None) and not timezone.is_aware(value):\n return timezone.make_aware(value, tz)\n return value\n except AttributeError:\n return super().enforce_timezone(value)",
"def addNewUserWithTimeZone(SID, username, userpass, lastname, firstname, email, additionalname, street, zip, fax, states_id, town, language_id, baseURL, jNameTimeZone):\n return call(\"addNewUserWithTimeZone\", SID, username, userpass, lastname, firstname, email, additionalname, street, zip, fax, states_id, town, language_id, baseURL, jNameTimeZone)",
"def _get_tz():\n return 'UTC'",
"def __init__(self, name=None):\n if name:\n self.name = name\n else:\n self.name = 'UTC'\n\n #Check timezone is valid by trying to instantiate it. May raise error.\n pytz.timezone(self.name)",
"def now(self):\n if 'timezone' in self._data:\n return pytz.utc.localize(datetime.datetime.utcnow()).astimezone(pytz.timezone(self._data['timezone']))\n else:\n return pytz.utc.localize(datetime.datetime.utcnow())",
"def test_property_timezone(self):\n\n timezone = self.location.timezone\n\n self.assertIsInstance(timezone, str)\n self.assertRaises(DataObjectError, \n setattr(self, \"timezone\", \"US/Nowhere\")\n )",
"def setup_datetime(self):\n \n current_date_time = datetime.now()\n timezone_diference = timedelta(hours=-3)\n return timezone(timezone_diference), current_date_time",
"def get_timzone_offset(self, timezone):\n raise NotImplementedError",
"def set_time_override(override_time=datetime.datetime.utcnow()):\r\n utcnow.override_time = override_time",
"def adjust_datetime_to_timezone(value, from_tz, to_tz=None):\r\n if to_tz is None:\r\n to_tz = settings.TIME_ZONE\r\n if value.tzinfo is None:\r\n if not hasattr(from_tz, \"localize\"):\r\n from_tz = pytz.timezone(smart_str(from_tz))\r\n value = from_tz.localize(value)\r\n return value.astimezone(pytz.timezone(smart_str(to_tz)))",
"def timetz(self):\n return time(\n self.hour,\n self.minute,\n self.second,\n self.microsecond,\n self._tzinfo,\n fold=self.fold,\n )",
"def setUTC(self, flag):\n try:\n\n adjtimeFile = \"/etc/adjtime\"\n if self.__mountDir:\n adjtimeFile = self.__mountDir + adjtimeFile\n\n fd = open(adjtimeFile)\n content = fd.read()\n fd.close()\n\n newContent = content\n\n if flag and not \"UTC\" in content:\n if \"LOCAL\" in content:\n newContent = re.sub(\"LOCAL\", \"UTC\", content)\n else:\n newContent += \"UTC\\n\"\n elif not \"LOCAL\" in content:\n if \"UTC\" in content:\n newContent = re.sub(\"UTC\", \"LOCAL\", content)\n else:\n newContent += \"LOCAL\\n\"\n\n fd = open(adjtimeFile, \"w\")\n fd.write(newContent)\n fd.close()\n except Exception as e:\n self.__logger.critical(\"Failed to write UTC configuration\")\n raise ZKVMError(\"POSTINSTALL\", \"TIMEZONE\", \"UTC_CONF\")",
"def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time",
"def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)",
"def save(self, *args, **kwargs):\n if not self.user_id or not self.created:\n self.created = datetime.datetime.today()\n self.modified = datetime.datetime.today()\n return super(UserProfile, self).save(*args, **kwargs)",
"def utc(self):\n return self._utc",
"def set_date_tzinfo(d, tz_name=None):\r\n if tz_name and not d.tzinfo:\r\n tz = pytz.timezone(tz_name)\r\n return tz.localize(d)\r\n return d",
"def __init__(self,\n qaisession: typing.Optional[qai_helper.QAISession],\n tz_name: str) -> None:\n self.qaisession = qaisession\n timelib.set_local_timezone(tz_name)\n self._current_date = timelib.loc_nowtime().date()\n self._db_has_changed = True\n self._cachedct: typing.Optional[dict] = None",
"def configure_wifi(self, ssid, password, uid=0, timezone=None):\n extra_params = {}\n if timezone is not None:\n now = datetime.datetime.now(pytz.timezone(timezone))\n offset_as_float = now.utcoffset().total_seconds() / 60 / 60\n extra_params[\"tz\"] = timezone\n extra_params[\"gmt_offset\"] = offset_as_float\n\n return super().configure_wifi(ssid, password, uid, extra_params)",
"def grouping_timezone(self):\n return tz.gettz(self._summariser.grouping_time_zone)",
"def set_utc(df, locale):\n return df.tz_localize('utc').tz_convert(None)",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Time(timezone=kwargs.get(\"timezone\", False))",
"def tzstr(self, tz):\n return 'dateutil/' + tz",
"def tz(self, dist: float) -> \"Mate\":\n self.translate(self.z_dir, dist)\n return self",
"def set_offline(self, tzone):\n\t\t\n\t\tif tzone.startswith(\"Other/\"):\n\t\t\ttzone.replace(\"Other/\",\"\")\n\t\t\n\t\twith open(\"/etc/timezone\", \"w\") as f:\n\t\t\tf.write(tzone + \"\\n\")\n\t\t\t\t\n\t\tif os.path.exists(\"/etc/localtime\"):\n\t\t\tos.remove(\"/etc/localtime\")\n\t\tshutil.copy2(\"/usr/share/zoneinfo/%s\" % (tzone),\"/etc/localtime\")",
"def default(self):\n\t\t\n\t\tif self.no_dbus: return\n\t\t\n\t\treturn self.TimeZoneProperties.Get('(ss)', BUS_NAME, 'Timezone')",
"def original_start_time_zone(self):\n if \"originalStartTimeZone\" in self._prop_dict:\n return self._prop_dict[\"originalStartTimeZone\"]\n else:\n return None",
"def set_user(self, user):\r\n self.user = user",
"def get_db_prep_save(self, value, connection=None):\n\n if value is not None:\n if value.tzinfo is not None:\n ## convert to settings.TIME_ZONE\n value = value.astimezone(default_tz)\n \n value = value.replace(tzinfo=None)\n return super(LocalizedDateTimeField, self).get_db_prep_save(value, connection=connection)",
"def zone(self, zone: str):\n\n self._zone = zone",
"def tzname(self):\n if self._tzinfo is None:\n return None\n name = self._tzinfo.tzname(None)\n _check_tzname(name)\n return name",
"def setTimepoint(self, tp):\n\t\tpass",
"def tz_localize(self, dt):\n if is_datetime(dt):\n # Naive datetime, see\n # https://docs.python.org/3/library/datetime.html#available-types\n if dt.tzinfo == None or dt.tzinfo.utcoffset(dt) == None:\n return dt\n else:\n return dt.astimezone(self.args.tz)\n elif is_date(dt):\n return dt\n else:\n raise ValueError('Expected datetime or date object')"
] |
[
"0.70524657",
"0.70524657",
"0.6869655",
"0.6587702",
"0.650735",
"0.64232886",
"0.63101465",
"0.6221063",
"0.6113634",
"0.6078929",
"0.6078929",
"0.6078929",
"0.58731836",
"0.5856435",
"0.5750898",
"0.57249284",
"0.5719738",
"0.5674356",
"0.5662734",
"0.5613859",
"0.55795693",
"0.55572295",
"0.55502605",
"0.551034",
"0.5493174",
"0.5471477",
"0.54626894",
"0.54453015",
"0.54387164",
"0.5433577",
"0.5409144",
"0.5404137",
"0.54034483",
"0.5385267",
"0.5380742",
"0.53607124",
"0.5339595",
"0.5327935",
"0.5314142",
"0.5287926",
"0.5287107",
"0.528463",
"0.5246568",
"0.51549745",
"0.5053454",
"0.5018281",
"0.5017761",
"0.49987158",
"0.49660507",
"0.4963823",
"0.4961693",
"0.4945764",
"0.49323323",
"0.49309364",
"0.4887925",
"0.4860247",
"0.48454788",
"0.48454788",
"0.48145345",
"0.47957215",
"0.4786721",
"0.47646165",
"0.47634825",
"0.4759689",
"0.47568542",
"0.4741146",
"0.47109285",
"0.46954554",
"0.46745244",
"0.46267048",
"0.4608631",
"0.4595556",
"0.45942616",
"0.45845994",
"0.45800927",
"0.45765543",
"0.45621568",
"0.45449904",
"0.45240915",
"0.45114142",
"0.45086995",
"0.4475615",
"0.44637638",
"0.44636217",
"0.44594818",
"0.44583198",
"0.44478807",
"0.44461882",
"0.44404408",
"0.44295028",
"0.4415057",
"0.43961623",
"0.43938646",
"0.43901855",
"0.43896288",
"0.4387191"
] |
0.7129376
|
4
|
Gets the date_joined of this UserBase. The time the user account was created.
|
def date_joined(self):
return self._date_joined
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_account_created_date(self):\n return self.account_created_date",
"def getJoiningTime(self):\n return self.__joinTime",
"def created_at(self):\n created_at = self.joined_at\n if created_at is None:\n created_at = DISCORD_EPOCH_START\n \n return created_at",
"def date_joined(self, date_joined):\n\n self._date_joined = date_joined",
"def getUserSignupDate(self):\r\n userinfo = self.getUserInfo()\r\n timestamp = int(float(userinfo[\"signupTimeSec\"]))\r\n return time.strftime(\"%m/%d/%Y %H:%M\", time.gmtime(timestamp))",
"def joined_days(self):\n return (timezone.now() - self.user.date_joined).days",
"def created_date_time(self) -> str:\n return pulumi.get(self, \"created_date_time\")",
"def date_created(self):\n return self._date_created",
"def date_created(self):\n return self._date_created",
"def date_created(self):\n return self._date_created",
"def date_created(self) -> datetime:\n return self._date_created",
"def created_date(self):\n return self._created_date",
"def created_date(self):\n return self._created_date",
"def created_time(self) -> datetime.datetime:\n return self.__created_time",
"def created_time(self) -> datetime.datetime:\n return self.__created_time",
"def date_added(self) -> str:\n return self._date_added.strftime('%Y-%m-%d')",
"def created_on(self):\n return self.get_time(\"created_on\")",
"def get_account_created_date_formatted(self):\n return self.account_created_date_formatted",
"def date_created(self) -> str:\n return pulumi.get(self, \"date_created\")",
"def GetDateCreated(self):\n return str(self.datecreated)",
"def registered_date(self):\n return sa.Column(sa.TIMESTAMP(timezone=False), default=datetime.datetime.utcnow, server_default=sa.func.now())",
"def time_created(self):\n return self._time_created",
"def time_created(self):\n return self._time_created",
"def time_created(self):\n return self._time_created",
"def time_created(self):\n return self._time_created",
"def date_registered(self) -> datetime:\n return datetime.utcfromtimestamp(self.registered)",
"def created_user(self):\n return self._created_user",
"def create_time(self):\n return self._create_time",
"def create_time(self):\n return self._create_time",
"def create_time(self):\n return self._create_time",
"def created_on(self):\n return self._created_on",
"def created_on(self):\n return self._created_on",
"def created_on(self):\n return self._created_on",
"def created_on(self):\n return self._created_on",
"def date_of_birth(self, instance):\r\n return instance.user.profile.date_of_birth",
"def created_timestamp(self):\n return self._created_timestamp",
"def date_added(self):\n return datetime.datetime.fromtimestamp(self.fields['addedDate'])",
"def date_of_birth(self):\n return self.__date_of_birth",
"def get_creation_time(self):\n return self.get_attr('date_created')",
"def created_time_utc(self) -> str:\n return pulumi.get(self, \"created_time_utc\")",
"def get_join_date(handle: str) -> str:\n baby_scraper = Scraper(handle)\n join_date = baby_scraper.api.get_user(handle).created_at\n return join_date",
"def get_date(self):\n return self.date",
"def get_date(self):\n return self.date",
"def get_date(self):\n return self.date",
"def get_date(self):\n return self.date",
"def test_user_creation_old_date_joined(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, 'alice@example.com')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiry_date = datetime_now() - timedelta(settings.ACCOUNT_ACTIVATION_DAYS)\n self.assertGreater(new_user.date_joined, expiry_date)",
"def last_updated_user(self):\n return self._last_updated_user",
"def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")",
"def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")",
"def time_created(self) -> str:\n return pulumi.get(self, \"time_created\")",
"def get_last_update_time(self):\n return self.last_update_time",
"def created_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time\")",
"def created_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time\")",
"def date(self):\n return DateTime(self.created)",
"def get_last_visit_timestamp(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n api_response = query_users_table_by_id(khoros_object, 'last_visit_time', user_settings['id'], first_item=True)\n # TODO: Add the ability to parse the timestamp into a datetime object or change the string format\n return api_response['last_visit_time']",
"def get_creation_time(self):\n return self.creation_time",
"def creation_datetime(self) -> datetime:\n return utc_to_local(self._db_data.creation_datetime)",
"def get_created_time(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT created_time FROM sessions WHERE id = ?;',\n (self.sid,))\n created_time = cursor.fetchone()\n cursor.close()\n connection.close()\n return created_time[0]",
"def date(self):\n return self.status.created_at",
"def get_time(self):\n return self.__time",
"def expireDate(self)->datetime:\n return self.firstAccessDate + timedelta(seconds=self.expirePeriodInSeconds)",
"def created_at(self):\n return self._created_at",
"def created_at(self):\n return self._created_at",
"def created_at(self):\n return self._created_at",
"def created_at(self):\n return self._created_at",
"def created_at(self) -> datetime.datetime:\n return self._created_at",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time",
"def created_time_utc(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_time_utc\")",
"def last_update_time(self):\n return self._last_update_time",
"def created_at(self):\n return self._domain.created_at",
"def get_last_time(self):\n \n return self._last",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self) -> str:\n return pulumi.get(self, \"created_at\")",
"def created_at(self):\n return self.getattr('created_at')",
"def expireDate(self)->datetime:\n return datetime.now() + self.secondsLeft",
"def last_date(self):\n if self._last_date is None:\n raise ValueError(\"Run pick() method before access this property\")\n return self._last_date",
"def creation_timestamp(self):\n\n return self.getThisUpdate()",
"def migrations_time(self):\n return self._migrations_time",
"def last_ownership_update_time(self) -> str:\n return pulumi.get(self, \"last_ownership_update_time\")",
"def join_sys_time(self):\n return self._join_sys_time",
"def get_time_date(self):\n return time.strftime(\"%m-%d-%Y %H:%M\")",
"def created_on(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_on\")",
"def team_creation_time_utc(self) -> str:\n return pulumi.get(self, \"team_creation_time_utc\")",
"def expireDate(self)->datetime:\n return self.timeEnd",
"def date_of_birth(self) -> str:\n return self._date_of_birth.strftime('%Y-%m-%d')",
"def date(self):\n return self._date",
"def date(self):\n return self._date",
"def date(self):\n return self._date",
"def creation_date(self) -> str:\n return pulumi.get(self, \"creation_date\")",
"def creation_date(self) -> str:\n return pulumi.get(self, \"creation_date\")",
"def creation_date(self) -> str:\n return pulumi.get(self, \"creation_date\")",
"def dt_last_update(self):\n return self.last_update",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")",
"def create_time(self) -> str:\n return pulumi.get(self, \"create_time\")"
] |
[
"0.62656933",
"0.6219153",
"0.59980553",
"0.59758735",
"0.5934436",
"0.587002",
"0.57954764",
"0.5775895",
"0.5775895",
"0.5775895",
"0.57507694",
"0.57064766",
"0.57064766",
"0.56547153",
"0.56547153",
"0.56422794",
"0.5618498",
"0.5597825",
"0.55175257",
"0.550172",
"0.54831547",
"0.5463635",
"0.5463635",
"0.5463635",
"0.5463635",
"0.54320157",
"0.54100984",
"0.54035217",
"0.54035217",
"0.54035217",
"0.5387795",
"0.5387795",
"0.5387795",
"0.5387795",
"0.5382273",
"0.5328676",
"0.53091484",
"0.53054184",
"0.52879834",
"0.5279328",
"0.52713925",
"0.5258129",
"0.5258129",
"0.5258129",
"0.5258129",
"0.52374935",
"0.52372235",
"0.5223981",
"0.5223981",
"0.5223981",
"0.52184594",
"0.51896423",
"0.51896423",
"0.51673007",
"0.5154345",
"0.51284564",
"0.51223457",
"0.51065123",
"0.5087857",
"0.50834215",
"0.5083224",
"0.50796723",
"0.50796723",
"0.50796723",
"0.50796723",
"0.50591797",
"0.5053564",
"0.5053564",
"0.504626",
"0.5039712",
"0.50152117",
"0.5014135",
"0.5000025",
"0.5000025",
"0.5000025",
"0.49954137",
"0.4985369",
"0.49761987",
"0.49601635",
"0.4958622",
"0.4957348",
"0.49524373",
"0.49472436",
"0.4945957",
"0.4938437",
"0.49265963",
"0.4920798",
"0.49194273",
"0.49194273",
"0.49194273",
"0.49188927",
"0.49188927",
"0.49188927",
"0.49187297",
"0.49176922",
"0.49176922",
"0.49176922",
"0.49176922",
"0.49176922",
"0.49176922"
] |
0.7982348
|
0
|
Sets the date_joined of this UserBase. The time the user account was created.
|
def date_joined(self, date_joined):
self._date_joined = date_joined
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def date_joined(self):\n return self._date_joined",
"def test_user_creation_old_date_joined(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, 'alice@example.com')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiry_date = datetime_now() - timedelta(settings.ACCOUNT_ACTIVATION_DAYS)\n self.assertGreater(new_user.date_joined, expiry_date)",
"def set_account_created_date(self, account_created_date):\n self.account_created_date = account_created_date",
"def test_unexpired_account_old_date_joined(self):\n self.user_info['date_joined'] = datetime_now(\n ) - timedelta(settings.ACCOUNT_ACTIVATION_DAYS + 1)\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertFalse(profile.activation_key_expired())",
"def save(self, *args, **kwargs):\n if not self.user_id or not self.created:\n self.created = datetime.datetime.today()\n self.modified = datetime.datetime.today()\n return super(UserProfile, self).save(*args, **kwargs)",
"def _set_joined(self, data):\n if self.joined_at is None:\n self.joined_at = parse_joined_at(data)",
"def SetDateCreated(self, date):\n self.datecreated = str(date)",
"def date_created(self, date_created):\n self._date_created = date_created",
"def date_created(self, date_created):\n self._date_created = date_created",
"def created_date(self, created_date):\n self._created_date = created_date",
"def date_created(self, date_created: datetime):\n\n self._date_created = date_created",
"def created_user(self, created_user):\n self._created_user = created_user",
"def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")",
"def date_created(self, date_created):\n\n self._date_created = date_created",
"def date_created(self, date_created):\n\n self._date_created = date_created",
"def date_created(self, date_created):\n\n self._date_created = date_created",
"def date_created(self, date_created):\n \n self._date_created = date_created",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def created_date(self, created_date):\n\n self._created_date = created_date",
"def joined_days(self):\n return (timezone.now() - self.user.date_joined).days",
"async def on_member_join(self, member: Member):\n\n if not self._is_tracked(member.guild, EventPriority.join):\n return\n\n em = self.em_base(\n member,\n f\"User {member.mention} ({member.name}) joined\",\n EventColors.join.value\n )\n\n em.add_field(\n name=\"Account Creation Timestamp\",\n value=self._get_timestamp()\n )\n\n await self.log_event(em, member.guild, priority=EventPriority.join)",
"def joined_dts(self, joined_dts):\n\n self._joined_dts = joined_dts",
"def set_last_used_on(self):\n self.last_used_on = datetime.now()\n self.save()",
"def expire_date(self, expire_date):\n\n self._expire_date = expire_date",
"def set_account_created_date_formatted(self, account_created_date_formatted):\n self.account_created_date_formatted = account_created_date_formatted",
"def created_at(self):\n created_at = self.joined_at\n if created_at is None:\n created_at = DISCORD_EPOCH_START\n \n return created_at",
"def test_user_creation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(new_user.get_username(), 'alice')\n self.assertEqual(new_user.email, 'alice@example.com')\n self.assertTrue(new_user.check_password('swordfish'))\n self.assertFalse(new_user.is_active)\n\n expiration_date = datetime_now() - timedelta(\n settings.ACCOUNT_ACTIVATION_DAYS\n )\n self.assertGreater(new_user.date_joined, expiration_date)",
"def _user_decorator(create_user):\n @wraps(create_user)\n def register_last_active(*args, **kwargs):\n user = create_user(*args, **kwargs)\n active_time = UserActiveTime(user=user,\n last_active=user.date_joined)\n active_time.save()\n return user, active_time\n\n return register_last_active",
"def set_datetime(self, date):\n self.date = date",
"def date_registered(self) -> datetime:\n return datetime.utcfromtimestamp(self.registered)",
"def get_account_created_date(self):\n return self.account_created_date",
"def datecreated(self, datecreated):\n\n self._datecreated = datecreated",
"async def joined(self, ctx, member: discord.Member):\n await ctx.send('{0.name} joined in {0.joined_at}'.format(member))",
"def created_date_utc(self, created_date_utc):\n\n self._created_date_utc = created_date_utc",
"def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)",
"def getUserSignupDate(self):\r\n userinfo = self.getUserInfo()\r\n timestamp = int(float(userinfo[\"signupTimeSec\"]))\r\n return time.strftime(\"%m/%d/%Y %H:%M\", time.gmtime(timestamp))",
"def set_end_date(self, date):\n pass",
"def set_date(self, date):\n self.date = date",
"def expireDate(self)->datetime:\n return self.firstAccessDate + timedelta(seconds=self.expirePeriodInSeconds)",
"def registered_date(self):\n return sa.Column(sa.TIMESTAMP(timezone=False), default=datetime.datetime.utcnow, server_default=sa.func.now())",
"def create_date(self, create_date):\n\n self._create_date = create_date",
"def create_date(self, create_date):\n\n self._create_date = create_date",
"def create_date(self, create_date):\n\n self._create_date = create_date",
"def getJoiningTime(self):\n return self.__joinTime",
"def date_start(self, date_start):\n\n self._date_start = date_start",
"def set_start_date(self, date):\n pass",
"def set_date(self, date):\n self.date = date\n return",
"def is_joined_days_passed(self, days):\n return timezone.now() >= self.user.date_joined + timedelta(days=days)",
"async def joined(ctx, member: discord.Member):\n await ctx.send('{0.name} joined in {0.joined_at}'.format(member))",
"def update_last_login(sender, user, **kwargs):\n try:\n user_last_login = UserLastLogin.objects.get(username=user.username)\n except UserLastLogin.DoesNotExist:\n user_last_login = UserLastLogin(username=user.username)\n user_last_login.last_login = timezone.now()\n user_last_login.save()",
"async def joined(ctx, member: discord.Member):\n await ctx.send(\"{0.name} joined in {0.joined_at}\".format(member))",
"async def joined(ctx, member: discord.Member):\n await bot.send('{0.name} joined in {0.joined_at}'.format(member))",
"def save(self, *args, **kwargs):\n self.expire_date = self.sent_date + timedelta(days=self.period)\n super(Referral, self).save(*args, **kwargs)",
"def create_user(self, email, password, **extra_fields):\n if not email:\n \traise ValueError('Must provide a valid email address')\n\n now = timezone.now()\n user = self.model(\n email=self.normalize_email(email),\n date_joined=now,\n last_login=now,\n **extra_fields\n ) \n\n user.set_password(password)\n user.save(using=self._db)\n return user",
"def save(self, *args, **kwargs):\r\n\r\n\t\t# if self.has_django_dashboard_access is True:\r\n\t\t# self.is_staff = True\r\n\t\tsuper(User, self).save(*args, **kwargs)",
"def expireDate(self)->datetime:\n return datetime.now() + self.secondsLeft",
"async def on_member_join(self, member: discord.Member) -> None:\n\n await add_user_in_db(member, member.guild)\n\n guild_from_db = await Guilds.get(guild_id=member.guild.id)\n role_saver = guild_from_db.role_saver\n if role_saver:\n user_roles = await UserRoles.get_many(guild_id=member.guild.id, user_id=member.id)\n if user_roles:\n for rol in user_roles:\n role = discord.utils.get(member.guild.roles, id=rol.role_id)\n if role.name == '@everyone':\n continue\n else:\n await member.add_roles(role)\n\n await Profiles.update(user_id=member.id,\n guild_id=member.guild.id,\n set=[\"joins = joins + 1\"])\n await Guilds.update(guild_id=member.guild.id,\n set=[\"day_joins = day_joins + 1\"])\n\n await self.refresh_user_count_channel(member.guild)",
"def _RecordVisitTime(self, mr, now=None):\n now = now or int(time.time())\n if not settings.read_only and mr.auth.user_id:\n user_pb = mr.auth.user_pb\n if (user_pb.last_visit_timestamp <\n now - framework_constants.VISIT_RESOLUTION):\n user_pb.last_visit_timestamp = now\n self.services.user.UpdateUser(mr.cnxn, user_pb.user_id, user_pb)",
"def save(self, *args, **kwargs):\n if not self.id:\n self.created = timezone.now()\n return super(UserActivity, self).save(*args, **kwargs)",
"def start_date(self, start_date):\n self._start_date = start_date",
"def date_created(self) -> datetime:\n return self._date_created",
"def date_finished(self, date_finished):\n self._date_finished = date_finished",
"def user_post_save(sender, instance, created, **kwargs):\n\t\tif created == True:\n\t\t\tup = UserProfile()\n\t\t\tup.user = instance\n\t\t\tup.save()",
"def save(self, *args, **kwargs):\n if not self.id:\n self.create_date = timezone.now()\n self.update_date = timezone.now()\n super().save(*args, **kwargs)",
"async def joined(ctx, member: discord.Member):\n await bot.say('{0.name} joined in {0.joined_at}'.format(member))",
"def save(self, *args, **kwargs):\n if not self.id:\n self.last_msg_time = timezone.now()\n super(WeixinUser, self).save(*args, **kwargs)",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def start_date(self, start_date):\n\n self._start_date = start_date",
"def set_last_played_timestamp(self, date: str):\n self.has_been_played = True\n self.last_played_timestamp = (get_timestamp_from_date(date))",
"def completion_date(self, completion_date):\n\n self._completion_date = completion_date",
"def date_added(self, value: datetime) -> None:\n self._date_added = value",
"def date(self, new_date):\n self._date.date = new_date",
"def create_posted_on_property(self):\n self.posted_on = self.added_on.date",
"def on_joinuser(self, data):\n user_data = {\n 'un': data[3], # nick\n 'ml': data[4], # mod level\n 'st': data[5], # status related\n 'id': data[6], # ezcapechat user id\n 'su': data[7] # ?\n }\n if data[3] == self.users.client.nick:\n self.users.add_client_data(user_data)\n else:\n _user = self.users.add(data[3], user_data)\n print ('%s Joined the room.' % _user.nick)\n\n #BOT\n if (_user.nick.lower() in self.autogreet):\n self.send_public(\"%s, %s\" % (_user.nick, self.autogreet[_user.nick.lower()]))",
"async def joined(member : discord.Member):\n await bot.say('{0.name} joined in {0.joined_at}'.format(member))",
"async def joined(member : discord.Member):\n await bot.say('{0.name} joined in {0.joined_at}'.format(member))",
"async def joined(member : discord.Member):\n await bot.say('{0.name} joined in {0.joined_at}'.format(member))",
"def dep_date(self, dep_date):\n\n self._dep_date = dep_date",
"def setCreatedDate(self, *args):\n return _libsbml.ModelHistory_setCreatedDate(self, *args)",
"def date_time(self, date_time):\n\n self._date_time = date_time",
"def creation_date(self, creation_date):\n\n self._creation_date = creation_date",
"def creation_date(self, creation_date):\n\n self._creation_date = creation_date",
"def create_user_and_login(self, agreed_to_terms_of_service, filled_out):\n with mute_signals(post_save):\n profile = ProfileFactory.create(\n agreed_to_terms_of_service=agreed_to_terms_of_service,\n filled_out=filled_out,\n )\n profile.user.social_auth.create(\n provider=EdxOrgOAuth2.name,\n uid=\"{}_edx\".format(profile.user.username)\n )\n self.client.force_login(profile.user)",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_user(context, self.id, updates)\n self.obj_reset_changes()",
"def expiration_date(self, expiration_date):\n\n self._expiration_date = expiration_date",
"def start_date(self, start_date):\n \n self._start_date = start_date",
"def save_user(self):\n\n User.user_list.append(self)",
"def last_updated_user(self, last_updated_user):\n self._last_updated_user = last_updated_user",
"def join_sys_time(self, join_sys_time):\n self._join_sys_time = join_sys_time",
"def set_player_wins(self, player, wins):\n # Get the current time stamp\n time_stamp = time()\n\n # Is this a new winner?\n if player.unique_id not in self:\n\n # Add the new winner to the database\n self.cursor.execute(\n 'INSERT INTO gungame_winners (name, unique_id, wins, '\n 'time_stamp, last_win) VALUES(?, ?, ?, ?, ?)',\n (player.name, player.unique_id, 0, time_stamp, time_stamp)\n )\n\n # Get the winner's instance\n instance = self[player.unique_id]\n\n # Set the values for the instance\n instance.name = player.name\n instance.wins = wins\n instance.time_stamp = time_stamp\n instance.last_win = time_stamp\n\n # Update the winner's values in the database\n self.cursor.execute(\n 'UPDATE gungame_winners SET name=?, time_stamp=?, '\n 'wins=?, last_win=? WHERE unique_id=?', (\n player.name, instance.time_stamp, instance.wins,\n instance.last_win, player.unique_id,\n )\n )\n\n # Commit the changes to the database\n self.connection.commit()"
] |
[
"0.66438895",
"0.581493",
"0.56453663",
"0.51747286",
"0.5124787",
"0.5048352",
"0.49426377",
"0.49251464",
"0.49251464",
"0.49176228",
"0.49075496",
"0.49066377",
"0.49043387",
"0.48563272",
"0.48563272",
"0.48563272",
"0.48529363",
"0.48464775",
"0.48464775",
"0.48464775",
"0.48464775",
"0.48464775",
"0.47991368",
"0.47317973",
"0.47167116",
"0.47155413",
"0.47145188",
"0.4700385",
"0.4690831",
"0.46861255",
"0.46522337",
"0.46455684",
"0.4644215",
"0.46171054",
"0.46157703",
"0.45791227",
"0.4575436",
"0.4538994",
"0.45244205",
"0.45166683",
"0.45134628",
"0.45130926",
"0.45057285",
"0.45056537",
"0.45056537",
"0.45056537",
"0.45021817",
"0.4487002",
"0.44841385",
"0.44733942",
"0.44640872",
"0.4448107",
"0.44261387",
"0.44247612",
"0.4421771",
"0.441983",
"0.43965685",
"0.43805102",
"0.43677884",
"0.43677214",
"0.4354608",
"0.43365324",
"0.43329751",
"0.43193626",
"0.4310853",
"0.4286302",
"0.4277691",
"0.4274575",
"0.42721537",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42703304",
"0.42656702",
"0.4245984",
"0.42449844",
"0.42427424",
"0.42377126",
"0.42349315",
"0.42272204",
"0.42272204",
"0.42272204",
"0.42106447",
"0.4200876",
"0.4195689",
"0.41933358",
"0.41933358",
"0.41926792",
"0.41890275",
"0.41881827",
"0.41796708",
"0.41756117",
"0.41621172",
"0.4159064",
"0.41576675"
] |
0.7769368
|
0
|
Gets the delivery_email of this UserBase. The user's real email address. This field is present only if [email address visibility](/help/restrictvisibilityofemailaddresses) is limited and you are an administrator with access to real email addresses under the configured policy.
|
def delivery_email(self):
return self._delivery_email
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email",
"def get_email(self):\n return self._email",
"def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]",
"def get_email(self):\n return self.email",
"def getEmail(self):\n return self.__email",
"def customer_email(self):\n return self._customer_email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")",
"def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")",
"def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''",
"def getEmail(self):\n return self.email",
"def delivery_identity(self) -> pulumi.Output[Optional['outputs.EventSubscriptionDeliveryIdentity']]:\n return pulumi.get(self, \"delivery_identity\")",
"def email(self, instance):\r\n return instance.user.email",
"def business_email(self):\n return self._business_email",
"def email_address(self) -> str:\n return self._email_address",
"def getEmail(self):\n\t\treturn self.Email",
"def email(self) -> str:\n return self._email",
"def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)",
"def delivery_email(self, delivery_email):\n\n self._delivery_email = delivery_email",
"def email(self):\n return self._dict.get('email')",
"def delivery(self):\n return self._delivery",
"def business_owner_email(self):\n return self._business_owner_email",
"def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")",
"def log_useremail(self):\n return self.user.email",
"def user_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_email\")",
"def get_assignee_email(self, assignee_id):\n response = self.http_call(\"{0}/users/{1}.json\".format(self.uri, assignee_id))\n return json.loads(response.content.decode(sys.stdout.encoding, \"replace\"))[\"user\"][\"email\"]",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def get(self):\n user_id = request.args.get('user_id')\n return get_email(user_id)",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def account_email(self) -> str:\n return pulumi.get(self, \"account_email\")",
"def email(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"email\")",
"def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")",
"def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")",
"def Email(self, default=None):\n return self.data.get('email', default)",
"def get_email(khoros_object, user_settings=None, user_id=None, login=None, first_name=None, last_name=None,\n allow_multiple=False, display_warnings=True):\n user_settings = process_user_settings(user_settings, user_id=user_id, login=login,\n first_name=first_name, last_name=last_name)\n where_clause = _get_where_clause_for_email(user_settings)\n return _get_user_identifier(khoros_object, 'email', where_clause, allow_multiple, display_warnings)",
"def get_default_email(self):\n email_address = None\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE client_company_ID = %s ' \\\n u'AND communication_type = \"email\" ' \\\n u'AND main = 1'\n\n data = (self.id,)\n\n c, conn = connection(self.schema)\n\n try:\n c.execute(sql, data)\n\n address = c.fetchone()\n if address is not None:\n email_address = address[0]\n\n finally:\n conn_close(c, conn)\n\n return email_address",
"def get_email(self, id_):\n\n query = self._db.User.select(self._db.User.c.id_ == id_)\n query = query.with_only_columns([self._db.User.c.email, ])\n\n record = query.execute().fetchone()\n return record[0]",
"def get_email(obj):\r\n return obj.user.email",
"def delivery_address(self):\n registered_office = db.session.query(Office).filter(Office.business_id == self.id).\\\n filter(Office.office_type == 'registeredOffice').one_or_none()\n if registered_office:\n return registered_office.addresses.filter(Address.address_type == 'delivery')\n\n return db.session.query(Address).filter(Address.business_id == self.id).\\\n filter(Address.address_type == Address.DELIVERY)",
"def email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"email\")",
"def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")",
"def gcp_service_account_email(self) -> Optional[str]:\n return pulumi.get(self, \"gcp_service_account_email\")",
"def technical_owner_email(self):\n return self._technical_owner_email",
"def email(self):\n return self.__email",
"def mail(self):\n\n return self._mail",
"def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None",
"def mail(self):\n if \"mail\" in self._prop_dict:\n return self._prop_dict[\"mail\"]\n else:\n return None",
"def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")",
"def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")",
"def delivery(self, delivery):\n if self.local_vars_configuration.client_side_validation and delivery is None: # noqa: E501\n raise ValueError(\"Invalid value for `delivery`, must not be `None`\") # noqa: E501\n\n self._delivery = delivery",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"email\")",
"def service_account_email_address(self) -> str:\n return pulumi.get(self, \"service_account_email_address\")",
"def notification_sender_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notification_sender_email\")",
"def get_default_email(self):\n email = 'error@error.error'\n sql = u'SELECT detail ' \\\n u'FROM communication_TBL ' \\\n u'WHERE person_ID = %s ' \\\n u'AND main = 1 ' \\\n u'AND communication_type = \"email\"'\n data = (self.login_details['person_ID'])\n\n if verify_user_company_schema(self.login_details):\n c, conn = connection(self.login_details['company_schema'])\n\n try:\n c.execute(sql, data)\n value = c.fetchone()\n\n if value is not None:\n email = value[0]\n finally:\n conn_close(c, conn)\n return email",
"def get_receive_mail(self):\n return self.__mail",
"def get_user_email():\n if not is_authenticated() or not is_authenticated_CSC_user() or 'samlUserdata' not in session:\n return None\n\n csc_email = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('email', None), False)\n\n return csc_email[0] if csc_email else not_found('csc_email')\n return None",
"def customer_email(customer):\n return customer.get(\"email\")",
"def service_account_email_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_account_email_address\")",
"def service_account_email_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_account_email_address\")",
"def get_primary_email(self):\n return self.associated_emails.get(is_primary_email=True)",
"def delivery_properties(self) -> pulumi.Output[Optional[Sequence['outputs.EventSubscriptionDeliveryProperty']]]:\n return pulumi.get(self, \"delivery_properties\")",
"def clean_email(self):\n if getattr(self.instance, 'email', None):\n raise ValidationError(self.registered_error)\n return self.cleaned_data['email']",
"def get_email(cls, unused_provider_details):\r\n return None",
"def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)",
"def delivery(self) -> dict:\n delivery = self._delivery\n return delivery",
"def GetEmailAddress(user_id):\n user_id = user_id.strip()\n if '@' in user_id:\n email = user_id\n else:\n email = user_id + '@' + os.environ['AUTH_DOMAIN']\n\n if IsEmailValid(email):\n return email\n else:\n return None",
"def delivery_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventSubscriptionDeliveryPropertyArgs']]]]:\n return pulumi.get(self, \"delivery_properties\")",
"def delivery_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventSubscriptionDeliveryPropertyArgs']]]]:\n return pulumi.get(self, \"delivery_properties\")",
"def to_email_address(self):\n return self._to_recipients",
"def email(self, login_failures):\n return login_failures.user.email",
"def get_email_address(user_id: UserID) -> str:\n email_address = db.session \\\n .query(DbUser.email_address) \\\n .filter_by(id=user_id) \\\n .scalar()\n\n if email_address is None:\n raise ValueError(\n f\"Unknown user ID '{user_id}' or user has no email address\"\n )\n\n return email_address",
"def email(self):\n return sa.Column(sa.Unicode(100), nullable=False, unique=True)",
"def publisher_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_email\")",
"def from_email_address(self):\n return self._from_email",
"def get_user(self):\n if not self.is_valid():\n return None\n # error checking done in: clean_email\n # NOTE: all emails are stored in lower-case\n e = self.clean_email().lower()\n return User.objects.get(email=e)",
"def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email",
"def message_delivery(self) -> MessageDelivery:\n return self._message_delivery",
"def getEmail(self):\n return _libsbml.ModelCreator_getEmail(self)",
"def billing_contact_user(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"billing_contact_user\")",
"def mailing_address(self):\n if \"mailingAddress\" in self._prop_dict:\n if isinstance(self._prop_dict[\"mailingAddress\"], OneDriveObjectBase):\n return self._prop_dict[\"mailingAddress\"]\n else :\n self._prop_dict[\"mailingAddress\"] = PhysicalAddress(self._prop_dict[\"mailingAddress\"])\n return self._prop_dict[\"mailingAddress\"]\n\n return None",
"def email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"email\")",
"def generate_email_address(self):\n return \"%s.%s@%s\" % (uuid.uuid4(), self.mailbox, \"mailosaur.io\")",
"def email(self):\n return \"{}.{}@company.com\".format(self.first, self.last)",
"def publisher_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_email\")",
"def get_referral_emails(self):\n profiles = self.profiles.filter(should_get_notifications=True)\n return [profile.user.email for profile in profiles]"
] |
[
"0.6896622",
"0.68197286",
"0.66776025",
"0.6648298",
"0.64765364",
"0.6463804",
"0.6460512",
"0.64135796",
"0.64135796",
"0.64135796",
"0.64135796",
"0.63343066",
"0.63343066",
"0.6321957",
"0.63091224",
"0.62989455",
"0.6274076",
"0.6220656",
"0.62095183",
"0.6136559",
"0.61223",
"0.6116247",
"0.6104205",
"0.60089004",
"0.5989312",
"0.5968295",
"0.5952045",
"0.5948589",
"0.59365845",
"0.5927442",
"0.59049696",
"0.59049696",
"0.59049696",
"0.58666545",
"0.5858562",
"0.5858562",
"0.5858562",
"0.5858562",
"0.5803407",
"0.5790251",
"0.5740511",
"0.56854206",
"0.5682459",
"0.5654714",
"0.5646524",
"0.5643421",
"0.56258327",
"0.560628",
"0.55828637",
"0.5567499",
"0.5567499",
"0.5550047",
"0.5489744",
"0.54714376",
"0.54701704",
"0.54701704",
"0.5458505",
"0.5458505",
"0.54001755",
"0.5399691",
"0.5399691",
"0.5399691",
"0.5399691",
"0.5399691",
"0.5399691",
"0.5399691",
"0.53920156",
"0.5367951",
"0.5356972",
"0.5344563",
"0.53216064",
"0.5320249",
"0.5287982",
"0.528367",
"0.5278664",
"0.5274332",
"0.52723294",
"0.5252269",
"0.5245033",
"0.52317035",
"0.5214704",
"0.5198122",
"0.5198122",
"0.5189705",
"0.518871",
"0.51830393",
"0.51589054",
"0.51550066",
"0.5152305",
"0.5143442",
"0.51229274",
"0.5117309",
"0.5101527",
"0.50970024",
"0.5067856",
"0.5041542",
"0.4994486",
"0.49940622",
"0.49773192",
"0.49705487"
] |
0.8150735
|
0
|
Sets the delivery_email of this UserBase. The user's real email address. This field is present only if [email address visibility](/help/restrictvisibilityofemailaddresses) is limited and you are an administrator with access to real email addresses under the configured policy.
|
def delivery_email(self, delivery_email):
self._delivery_email = delivery_email
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def delivery_email(self):\n return self._delivery_email",
"def delivery(self, delivery):\n if self.local_vars_configuration.client_side_validation and delivery is None: # noqa: E501\n raise ValueError(\"Invalid value for `delivery`, must not be `None`\") # noqa: E501\n\n self._delivery = delivery",
"def setEmail(self, email):\n self.email = email\n return self",
"def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email",
"def email(self, email):\n if self.local_vars_configuration.client_side_validation and email is None: # noqa: E501\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n email is not None and len(email) > 64):\n raise ValueError(\"Invalid value for `email`, length must be less than or equal to `64`\") # noqa: E501\n\n self._email = email",
"def admin_email(self, admin_email):\n\n self._admin_email = admin_email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def email(self, email):\n\n self._email = email",
"def customer_email(self, customer_email):\n self._customer_email = customer_email",
"def email(self, email: str):\n\n self._email = email",
"def business_owner_email(self, business_owner_email):\n\n self._business_owner_email = business_owner_email",
"def recipient_email(self, recipient_email):\n\n self._recipient_email = recipient_email",
"def business_email(self, business_email):\n\n self._business_email = business_email",
"def setEmail(self, *args):\n return _libsbml.ModelCreator_setEmail(self, *args)",
"def message_delivery(self, delivery: MessageDelivery):\n self._message_delivery = delivery",
"def technical_owner_email(self, technical_owner_email):\n\n self._technical_owner_email = technical_owner_email",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email",
"def contact_email(self, contact_email):\n\n self._contact_email = contact_email",
"def email(self):\n billing_contact = self.owner.organization_user.user\n return billing_contact.email",
"def ga_at_delivery(self, ga_at_delivery):\n\n self.logger.debug(\"In 'ga_at_delivery' setter.\")\n\n self._ga_at_delivery = ga_at_delivery",
"def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")",
"def delivery_identity(self) -> Optional[pulumi.Input['EventSubscriptionDeliveryIdentityArgs']]:\n return pulumi.get(self, \"delivery_identity\")",
"def delivery(self, value: dict):\n self._delivery = value\n # Ensure the correct key is updated and object is set as dirty\n flag_modified(self, '_delivery')",
"def delivery_mode(self, delivery_mode):\n\n self._delivery_mode = delivery_mode",
"def get_email(self):\n return self._email",
"def email(self):\n # Look for a primary address\n useremail = UserEmail.query.filter_by(user_id=self.id, primary=True).first()\n if useremail:\n return useremail\n # No primary? Maybe there's one that's not set as primary?\n useremail = UserEmail.query.filter_by(user_id=self.id).first()\n if useremail:\n # XXX: Mark at primary. This may or may not be saved depending on\n # whether the request ended in a database commit.\n useremail.primary = True\n return useremail\n # This user has no email address. Return a blank string instead of None\n # to support the common use case, where the caller will use unicode(user.email)\n # to get the email address as a string.\n return u''",
"def email(self, instance):\r\n return instance.user.email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def email(self):\n return self._email",
"def getEmail(self):\n return self.__email",
"def get_email(self):\n return self.email",
"def email_address(self) -> str:\n return self._email_address",
"def customer_email(self):\n return self._customer_email",
"def client_email(self, client_email):\n\n self._client_email = client_email",
"def _set_user_email_address(self, request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n request.session[SESSION_VAR_EMAIL_ADDRESS] = form.cleaned_data['email']\n else:\n return form",
"def email(self) -> str:\n return self._email",
"def set_dispute_contact_email(self, email):\n if email == \"\":\n email = self.random_string_generator(8, string.ascii_lowercase) + \"@\" + self.random_string_generator(5, string.ascii_lowercase) + \".com\"\n self.set_value_into_input_field(self.dispute_contact_email_textbox_locator, email)",
"def get_user_email(self):\n member = self.get_user()\n if member:\n return member.getProperty('email')",
"def _set_campaign_email(self, campaign_email):\n if isinstance(campaign_email, str):\n campaign_email = TrackedCampaignEmail.objects.create(\n campaign=self.campaign, name=campaign_email\n )\n\n campaign_email.save()\n\n self.campaign_email = campaign_email",
"def deliveryStatus(self, deliveryStatus):\n\n self._deliveryStatus = deliveryStatus",
"def delivery_time(self, delivery_time):\n\n self._delivery_time = delivery_time",
"def getEmail(self):\n return self.email",
"def clean_email(self):\n if self.data.get(\"selected_item\") != self.AGENT_ID:\n # resume normal invite flow\n return super().clean_email()\n\n email = self.cleaned_data[\"email\"]\n email = get_invitations_adapter().clean_email(email)\n try:\n self._agent_user = User.objects.get(email__iexact=email)\n except User.DoesNotExist:\n return super().clean_email()\n\n if self._agent_user.account_type != AccountType.agent_user.value:\n raise forms.ValidationError(\n _(\"An active non-agent user is using this e-mail address\")\n )\n if self._agent_user.organisations.filter(\n id=self.instance.organisation.id\n ).exists():\n raise forms.ValidationError(\n _(\"This agent is already active for this organisation\")\n )\n\n return email",
"def email_address(self, email_address: \"str\"):\n self._attrs[\"emailAddress\"] = email_address",
"def test_activation_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, 'django@email.com')",
"def email(self, email):\n if email is None:\n raise ValueError(\"Invalid value for `email`, must not be `None`\") # noqa: E501\n\n self._email = email",
"def change_email(self, new_email):\n self.email = new_email\n print(f\"Email for {self.name} has been updated!\")\n return self.email",
"def delivery_identity(self) -> pulumi.Output[Optional['outputs.EventSubscriptionDeliveryIdentity']]:\n return pulumi.get(self, \"delivery_identity\")",
"def test_admin_approval_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, 'django@email.com')",
"def get_email(self):\n return self.reference[REF_EMAIL_ADDRESS][REF_VALUE]",
"def sent_by_email(self, sent_by_email):\n\n self._sent_by_email = sent_by_email",
"def test_admin_approval_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.activated = True\n self.registration_profile.objects.send_admin_approve_email(\n new_user, Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, 'registration@email.com')",
"def getEmail(self):\n\t\treturn self.Email",
"def test_activation_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_activation_email(Site.objects.get_current())\n self.assertEqual(mail.outbox[0].from_email, 'registration@email.com')",
"def delivery(self):\n return self._delivery",
"def _get_user_email_address(self, request):\n return request.session.get(SESSION_VAR_EMAIL_ADDRESS, not request.user.is_anonymous() and request.user.email)",
"def _update_attendee_by_email(email, marketing, gdpr, name=None):\n\n attendee = Attendee.objects.get(email=email)\n attendee.date_signed = datetime.date.today()\n attendee.marketing = marketing\n attendee.gdpr = gdpr\n if name:\n attendee.name = name\n attendee.save()\n\n return attendee",
"def business_email(self):\n return self._business_email",
"def set_email_notification(self, hit_type, email, event_types=None):\r\n return self._set_notification(hit_type, 'Email', email, event_types)",
"def Email(self, default=None):\n return self.data.get('email', default)",
"def sender_email_notifications(self, sender_email_notifications):\n\n self._sender_email_notifications = sender_email_notifications",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")",
"def from_email_address(self, val: EmailAddress):\n self._from_email = val",
"def from_email(self, from_email):\n\n self._from_email = from_email",
"def email(self):\n return sa.Column(sa.Unicode(100), nullable=False, unique=True)",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email(self) -> str:\n return pulumi.get(self, \"email\")",
"def email_address(self) -> \"str\":\n return self._attrs.get(\"emailAddress\")",
"def business_owner_email(self):\n return self._business_owner_email",
"def set_receive_mail(self):\n self.__mail = True",
"def change_email(self, email):\n self.active = False\n self.other_email = email\n self.key = EmailManager.generate_key()\n self.save()\n\n send_change_email(self, email)\n return self.key",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def email_address(self, email_address):\n\n self._email_address = email_address",
"def management_account_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"management_account_email\")",
"def SetCurrentUser(self, email, user_id='123456', is_admin=False):\n email = email or ''\n user_id = user_id or ''\n is_admin = '1' if is_admin else '0'\n self.testbed.setup_env(user_is_admin=is_admin,\n user_email=email,\n user_id=user_id,\n overwrite=True)",
"def test_admin_approval_complete_email_uses_registration_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, 'registration@email.com')",
"def test_admin_approval_complete_email_falls_back_to_django_default_from_email(self):\n new_user = UserModel().objects.create_user(**self.user_info)\n profile = self.registration_profile.objects.create_profile(new_user)\n profile.send_admin_approve_complete_email(Site.objects.get_current())\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, 'django@email.com')",
"def email(self):\n return self.__email",
"def send_sender_activation_email(self, email):\n logger.info(\"Function call: send_sender_activation_email for '{}'\".format(email, ))\n return self.__handle_error('Empty sender email') if not email else self.__handle_result(self.__send_request('senders/{}/code'.format(email, )))",
"def primary_email(self, primary_email):\n\n self._primary_email = primary_email",
"def email(self):\n return '{}.{}@email.com'.format(self.fname,self.lname)",
"def send_activation_email(self, user):\n activation_key = self.get_activation_key(user)\n context = self.get_email_context(activation_key)\n context[\"user\"] = user\n subject = render_to_string(\n template_name=self.email_subject_template,\n context=context,\n request=self.request,\n )\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = \"\".join(subject.splitlines())\n message = render_to_string(\n template_name=self.email_body_template,\n context=context,\n request=self.request,\n )\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)",
"def email(self):\n return self._dict.get('email')",
"def primary_contact_email(self, primary_contact_email):\n\n self._primary_contact_email = primary_contact_email",
"def validate_email(self, value):\n if not value:\n raise serializers.ValidationError(\"Email cannot be null\")\n return value"
] |
[
"0.69637316",
"0.6370568",
"0.59695375",
"0.59579545",
"0.58683085",
"0.5863861",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.58421206",
"0.582248",
"0.5751209",
"0.5748383",
"0.5698401",
"0.5613598",
"0.5521131",
"0.55090696",
"0.5491955",
"0.5447653",
"0.5447653",
"0.5443579",
"0.5429883",
"0.5398202",
"0.5398202",
"0.53678536",
"0.53605616",
"0.53353554",
"0.530964",
"0.52824575",
"0.52818877",
"0.52818877",
"0.52818877",
"0.52818877",
"0.528178",
"0.5275431",
"0.525476",
"0.52341497",
"0.5229668",
"0.520948",
"0.5202462",
"0.5198085",
"0.5190571",
"0.51848173",
"0.51504153",
"0.5140103",
"0.5132982",
"0.5123741",
"0.5123203",
"0.51162493",
"0.5112468",
"0.51095045",
"0.50983435",
"0.5081794",
"0.508037",
"0.5072279",
"0.50681704",
"0.5039898",
"0.5034028",
"0.5019678",
"0.49653253",
"0.49510583",
"0.49442506",
"0.49198705",
"0.49077097",
"0.48958743",
"0.48842904",
"0.48842904",
"0.48842904",
"0.48842904",
"0.488318",
"0.4875694",
"0.48722166",
"0.48617586",
"0.48617586",
"0.48617586",
"0.4849777",
"0.4844498",
"0.48332936",
"0.48300457",
"0.48236293",
"0.48236293",
"0.48236293",
"0.48215088",
"0.47882766",
"0.47838417",
"0.47808248",
"0.4775516",
"0.47547698",
"0.47485894",
"0.47480068",
"0.47401562",
"0.47342646",
"0.47254407",
"0.4716613"
] |
0.812475
|
0
|
Gets the profile_data of this UserBase. A dictionary containing custom profile field data for the user. Each entry maps the integer ID of a custom profile field in the organization to a dictionary containing the user's data for that field. Generally the data includes just a single `value` key; for those custom profile fields supporting Markdown, a `rendered_value` key will also be present.
|
def profile_data(self):
return self._profile_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def user_profile_data(id):\n user = User.query.get(id)\n return user.to_dict_profile()",
"def get_object_data(self, **kwargs):\n user = self.request.user\n return UserProfile.objects.get(user=user)",
"def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret",
"def get_user_data(self):\n return self.user_data",
"def get_user_data(self):\n return self.user_data",
"def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile",
"def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile",
"def get_user_profile(self):\n return self.user.profile",
"def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }",
"def _parse_profile_data (self, netflix_page_data):\n profiles = {};\n important_fields = [\n 'profileName',\n 'isActive',\n 'isAccountOwner',\n 'isKids'\n ]\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for profile_id in netflix_page_data.get('profiles'):\n if self._is_size_key(key=profile_id) == False and type(netflix_page_data['profiles'][profile_id]) == dict and netflix_page_data['profiles'][profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n if important_field == 'profileName':\n profile.update({important_field: HTMLParser.HTMLParser().unescape(netflix_page_data['profiles'][profile_id]['summary'][important_field]).encode('utf8')})\n else:\n profile.update({important_field: netflix_page_data['profiles'][profile_id]['summary'][important_field]})\n avatar_base = netflix_page_data['nf'].get(netflix_page_data['profiles'][profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar, 'isFirstUse': False})\n profiles.update({profile_id: profile})\n return profiles\n\n # values are stored in lists (returned from JS parser)\n # TODO: get rid of this christmas tree of doom\n for item in netflix_page_data:\n if 'hasViewedRatingWelcomeModal' in dict(item).keys():\n for profile_id in item:\n if self._is_size_key(key=profile_id) == False and type(item[profile_id]) == dict and item[profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n profile.update({important_field: item[profile_id]['summary'][important_field]})\n avatar_base = item['nf'].get(item[profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar})\n profiles.update({profile_id: profile})\n return profiles",
"def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data",
"def user_profile():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user profile\")\n user_profile = get_user_profile(access_token)\n user_profile['access_token'] = access_token\n return json.dumps(user_profile)",
"def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None",
"def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile",
"def get_profile():\n\n if request['user_id']:\n\n user = User.select().where(User.id == request['user_id']).get()\n uSchema = UserSchema()\n jsonUser = uSchema.dumps(user)\n\n del request['user_id']\n return jsonUser.data\n\n return",
"def parse(self):\n details = self.details()\n\n return Profile(\n book_id=self.book_id,\n title=self.title(),\n user_id=self.user_id(),\n username=self.username(),\n summary=self.summary(),\n published=self.published(),\n updated=self.updated(),\n **details\n )",
"def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info",
"def get_profile(self,fields=('id','first-name','last-name','headline','summary')):\n\n if not self._access_token:\n raise FBError(\"Authentication needed!\")\n \n token = oauth.Token(self._access_token['oauth_token'], self._access_token['oauth_token_secret'])\n client = oauth.Client(self.consumer, token)\n profile_url = self.profile_url % \",\".join(fields)\n resp, content = client.request(profile_url,headers={\"x-li-format\":'json'})\n \n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n \n try:\n return json.loads(content)\n except Exception, e:\n raise FBError(\"Invalid json %s.\" % unicode(e))",
"def user_data(self):\n itemuser = self.data['user']\n my_user_dict = {'user_id': itemuser['id'], 'user_name': itemuser['name'],\n 'user_handle': itemuser['screen_name'], 'user_desc': itemuser['description'],\n 'twitter_birthday': itemuser['created_at'], 'user_location': itemuser['location'],\n 'followers': itemuser['followers_count'], 'favorites': itemuser['favourites_count'],\n 'statuses': itemuser['statuses_count']}\n return my_user_dict",
"def getProfileInfo(self):\n doc = minidom.parse(urllib.urlopen(serverString + \"/rest/user/\" + self.name))\n for element in doc.getElementsByTagName(\"user\")[0].childNodes:\n if element.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif element.tagName == \"status\" and int(element.firstChild.data) != 1:\n raise ServerError(element.firstChild.data)\n elif element.tagName == \"input\":\n self.name = element.firstChild.data\n elif element.tagName == \"id\":\n self.id = element.firstChild.data\n elif element.tagName == \"image\":\n self.image = element.firstChild.data\n elif element.tagName == \"tagline\":\n if element.firstChild == None:\n self.tagline = None\n else:\n self.tagline = element.firstChild.data\n elif element.tagName == \"creation\":\n self.created = datetime.datetime.strptime(element.firstChild.data[:element.firstChild.data.rfind(\".\")]+\".GMT\", \"%Y-%m-%d %H:%M:%S.%Z\")",
"def get_meta(self) -> Meta:\n return Meta(\n object_type=\"profile\",\n extra_custom_props=[\n (\"property\", \"profile.username\", self.user.username),\n (\"property\", \"profile.first_name\", self.user.first_name),\n (\"property\", \"profile.last_name\", self.user.last_name),\n ]\n if self.user\n else [],\n title=self.display_name or self.name,\n image=self.image.large,\n )",
"def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info",
"def GetUserData(self):\r\n\r\n return self.user_data",
"def user_data(self, access_token, *args, **kwargs):\n params = self.setting(\"PROFILE_EXTRA_PARAMS\", {})\n response = kwargs.get('response') or {}\n params[\"access_token\"] = access_token\n headers = {\n \"Authorization\": \"%s %s\" % (\n response.get(\"token_type\", \"Bearer\").capitalize(),\n access_token),\n \"Accept\": 'application/json',\n \"Content-type\": 'application/json;charset=utf-8'}\n return self.get_json(self.USER_DATA_URL,\n params=params, headers=headers)",
"def get_login_user_profile(uid):\n # fetch the user info from db,\n # just in case the info has been updated somewhere\n json_user = User.find(uid).to_dict_with_mobile()\n json_user['work_experiences'] = work_service.get_work_experiences(uid)\n return json_user",
"def user_data(self):\n return self._user_data",
"def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo",
"def fetch_my_profile(self, api_token: str) -> dict:\n query = \"\"\"\n query myProfile {\n myProfile {\n id\n firstName\n lastName\n }\n }\n \"\"\"\n path = jmespath.compile(\n \"\"\"\n data.myProfile.{\n id: id\n first_name: firstName\n last_name: lastName\n }\n \"\"\"\n )\n\n data = self.do_query(query, api_token=api_token)\n\n parsed_data = path.search(data)\n self.contains_keys(parsed_data, [\"id\", \"first_name\", \"last_name\"])\n return parsed_data",
"def get_profile_info(self):\n\n drill_path = str(Path.home())+\"/Documents/ball_e_profiles/drill_profiles/{drill_name}/{drill_name}.csv\".format(\n drill_name=self.drill_name)\n with open(drill_path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n row_count = 0\n info_dict = dict()\n for row in csv_reader:\n if row_count == 0:\n row_count += 1\n else:\n info_dict[row[0]] = [row[1], row[2], row[3]]\n row_count += 1\n\n return info_dict",
"def json(self):\n\n this_user_detail = dict(\n arn=self.arn,\n create_date=self.create_date,\n id=self.user_id,\n inline_policies=self.inline_policies_json,\n inline_policies_count=len(self.inline_policies_json),\n # groups=self.groups,\n groups=self.groups_json,\n path=self.path,\n managed_policies_count=len(self.attached_managed_policies),\n managed_policies=self.attached_managed_policies_pointer_json,\n risks=self.consolidated_risks\n )\n return this_user_detail",
"def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct",
"def get_full_profile(self) -> Profile:\n return Profile(**{**self.profile, **self.contact})",
"def get_user_profile(self):\n return self.request('get', 'id/users')",
"def create(self, validated_data):\r\n user_data = validated_data.pop('user')\r\n user = UserSerializer.create(UserSerializer(), validated_data = user_data)\r\n profile, created = Profile.objects.update_or_create(user = user,\r\n bio = validated_data.pop('bio'),\r\n location = validated_data.pop('location'),\r\n birth_date = validated_data.pop('birth_date'))\r\n return profile",
"def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]",
"def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }",
"def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n profile, created = Profile.objects.update_or_create(\n user=user,\n avatar=validated_data.pop('avatar'),\n biography=validated_data.pop('biography'),\n link=validated_data.pop('link') \n )\n return profile",
"def extract_common_fields(self, data):\n member = data.get('member', {})\n return {'username': member.get('name'), 'email': member.get('email')}",
"def user_profile(token, u_id):\n # pylint: disable=unused-argument\n # NB: Supressed this warning because token is in fact used in\n # the decorator, however pylint doesn't check for this.\n user = database.get_user_data(u_id)\n return {\"user\": user}",
"def profile_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"profile_properties\")",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties",
"def get_user_profile(uid, viewer_uid):\n user = User.find(uid)\n if not user:\n st_raise_error(ErrorCode.USER_NOT_EXIST)\n work_experiences = work_service.get_work_experiences(uid)\n endorsement = get_user_endorsement(uid, current_user.user.id)\n latest_comment = EndorseComment.find_latest_by_uid(uid)\n profile = user.to_dict()\n profile['endorsement'] = endorsement\n profile['work_experiences'] = work_experiences\n if latest_comment:\n profile['latest_comment'] = latest_comment.to_dict()\n contact = Contact.find_by_uid(uid, viewer_uid)\n profile['is_contact'] = contact is not None \\\n and contact.status == ContactStatus.Connected\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def _username_to_profile(self, username: str) -> Dict[str, Any]:\n\n base_url = self.keys.pre_profile + username + self.keys.rank_token + self.keys.post_profile\n\n # Build the page source url for the given user's account\n con = urllib.request.urlopen(base_url)\n user_profile = con.read().decode('utf-8')\n\n # Convert the webpage to a profile JSON\n profile: dict = json.loads(str(user_profile))\n return profile",
"def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))",
"def user_data(self, access_token, *args, **kwargs):\n response = self.request(\n \"https://openapi.naver.com/v1/nid/me\",\n headers={\n \"Authorization\": f\"Bearer {access_token}\",\n \"Content_Type\": \"text/json\",\n },\n )\n\n data = response.json()\n\n return {\n \"id\": self._fetch(data, \"id\"),\n \"email\": self._fetch(data, \"email\"),\n \"username\": self._fetch(data, \"name\"),\n \"nickname\": self._fetch(data, \"nickname\"),\n \"gender\": self._fetch(data, \"gender\"),\n \"age\": self._fetch(data, \"age\"),\n \"birthday\": self._fetch(data, \"birthday\"),\n \"profile_image\": self._fetch(data, \"profile_image\"),\n }",
"def get_user_member(self, user):\n profile = user.get(\"profile\", {})\n name = profile.get(\"full_name\") or user.get(\"username\") or _(\"Untitled\")\n description = profile.get(\"affiliations\") or \"\"\n fake_user_obj = SimpleNamespace(id=user[\"id\"])\n current_identity = self.context[\"identity\"]\n avatar = current_users_service.links_item_tpl.expand(\n current_identity, fake_user_obj\n )[\"avatar\"]\n\n return {\n \"type\": \"user\",\n \"id\": user[\"id\"],\n \"name\": name,\n \"description\": description,\n \"avatar\": avatar,\n }",
"def get_profile():\n logger.debug(\"entering function get_profile\")\n response = read_user_profile()\n logger.debug(\"exiting function get_profile\")\n return jsonify(response)",
"def details(self):\n details = ProfileDetailsParser(self.details_string())\n\n return dict(\n follows=details.follows(),\n favorites=details.favorites(),\n rating=details.rating(),\n language=details.language(),\n genres=details.genres(),\n characters=details.characters(),\n )",
"def read_user_profile():\n logger.debug(\"entering function read_profile\")\n find_query = {\"user_id\": current_user.id}\n project_query = {\"_id\": 0, \"user_id\": 0, \"password\": 0}\n result = run_find_one_query(config.USERS_COL, find_query, project_query, error=True,\n error_msg=NO_USER_ERR_MSG)\n logger.info(\"fetched user profile for %s\", current_user.id)\n response = get_success_response(data=result)\n logger.debug(\"exiting function read_profile\")\n return response",
"def create(self, validated_data):\n request = self.context.get('request')\n profile = Profile(**validated_data)\n profile.user = request.user\n profile.save()\n return profile",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"def extract_user_gql(data):\n return {\n \"pk\": int(data[\"id\"]),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data[\"is_private\"],\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n \"media_count\": data[\"edge_owner_to_timeline_media\"][\"count\"],\n \"follower_count\": data[\"edge_followed_by\"][\"count\"],\n \"following_count\": data[\"edge_follow\"][\"count\"],\n \"biography\": data[\"biography\"],\n \"external_url\": data[\"external_url\"],\n \"is_business\": data[\"is_business_account\"],\n }",
"def fb_profile(self):\n return FBProfile.objects.get(fb_id=self.fb_id)",
"def _get_user_data(self):\n return {\"key\": self._key}",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def get_profile(request):\n collected_values = {}\n\n # Only allow GET requests on this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract params\n uid = request.GET['uid']\n key = request.GET['key']\n\n # Hardcoded key for security\n if key != SUPER_SECURE_STRING:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Key\"\n return JsonResponse(collected_values, status=400)\n\n # Grab the user's profile information\n users = LUser.objects.filter(user_id=uid)\n user = users[0]\n\n # Collect values\n collected_values[\"user_info\"] = user.get_map()\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Profile Result: %s\", user)\n return JsonResponse(collected_values, status=200)",
"def profile_user_json(user_id: int):\n current_user = User.query.filter_by(id=user_id).first_or_404()\n return jsonify(get_user_data(current_data))",
"def get_student_profile_data():\n # pylint: disable=no-member\n try:\n regexp_string = self.regexp_from_users_included_email(self.users_included_email)\n re.compile(regexp_string)\n users = self.students_for_course(regexp_string)\n except:\n log.info(\"regexp is invalid: '%s'\", regexp_string)\n users = []\n\n for user in users:\n student_id = anonymous_id_for_user(user, self.course_id)\n profile = user.profile\n\n vip = self.get_vip(user)\n image_url = None\n if vip:\n image_url = \"https://my.imd.org/api/profile/{}/profile-picture-header\".format(vip)\n else:\n if self.is_course_staff:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header-no-vip.gif')\n else:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header.gif')\n\n cohort_name = None\n if (self.is_course_cohorted(self.course_id)):\n cohort_name = self.get_cohort(user, self.course_id).name\n\n yield {\n 'student_id': student_id,\n 'username': user.username,\n 'fullname': profile.name,\n 'vip': vip,\n 'image_url': image_url,\n 'email': user.email,\n 'cohort_name': cohort_name,\n }",
"def build_profile(first, last, **user_info):\r\n # Build a dict with the required keys.\r\n profile = {'first': first, 'last': last}\r\n # Add any other keys and values.\r\n for key, value in user_info.items():\r\n profile[key] = value\r\n return profile",
"def get_profile(user):\n if user.is_authenticated():\n # Return the PootleProfile associated with authenticated users\n return user.get_profile()\n else:\n # Anonymous users get the PootleProfile associated with the 'nobody' user\n return User.objects.get(username='nobody').get_profile()",
"def user_profile(first, last, **add_info):\n profile = {}\n profile['firstname'] = first\n profile['lastname'] = last\n\n for key, value in add_info.items():\n profile[key] = value\n \n return profile",
"def getData(self):\r\n return personData(\r\n self.title.getVal(),\r\n self.first.getVal(),\r\n self.middle.getVal(),\r\n self.last.getVal(),\r\n self.suffix.getVal(),\r\n self.phone.getVal(),\r\n self.ext.getVal(),\r\n self.email.getVal(),\r\n self.affiliation.getVal())",
"def get_user_info_by_id(self, user_id: int) -> dict:",
"def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}",
"def fetch_user_data(self, user_id):\n\n log.info('Fetching user data from Twitter for ID %s' % user_id)\n user = self.api.get_user(user_id)\n props = user.__dict__ # user properties\n\n del props['_api'], props['status'] # no embedded objects\n\n props['accessed'] = datetime.datetime.now()\n props['detail'] = 'full'\n props['type'] = 'user'\n\n return props",
"def get_user_info(self):\n\n if self._access_token is None:\n raise RequiresAccessTokenError()\n response = self.__make_oauth_request(USER_INFO_URL, token=self._access_token, signed=True)\n return simplejson.loads(response.read())",
"def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\") or \"\",\n \"fullname\": fullname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n }",
"def get_profile(self):\n #for perm in _user_get_all_permissions(self,obj=None):\n\n if not hasattr(self, '_profile_cache'):\n from django.conf import settings\n if not getattr(settings, 'LOGIN_PROFILE_MODULE', False):\n raise SiteProfileNotAvailable(\n 'You need to set LOGIN_PROFILE_MODULE in your project '\n 'settings')\n try:\n app_label, model_name = settings.LOGIN_PROFILE_MODULE.split('.')\n except ValueError:\n raise SiteProfileNotAvailable(\n 'app_label and model_name should be separated by a dot in '\n 'the LOGIN_PROFILE_MODULE setting')\n try:\n model = models.get_model(app_label, model_name)\n if model is None:\n raise SiteProfileNotAvailable(\n 'Unable to load the profile model, check '\n 'LOGIN_PROFILE_MODULE in your project settings')\n self._profile_cache = model._default_manager.using(\n self._state.db).get(id__exact=self.id)\n self._profile_cache.user = self\n except (ImportError, ImproperlyConfigured):\n raise SiteProfileNotAvailable\n return self._profile_cache",
"def get_profile_data(auth, db):\n\n id_team, user, team, money, color_prim, color_sec = analyze_init(auth, db)\n id_user, seats, fans, ranking, streak = analyze_team_page(auth, db, id_team)\n\n \n v_profile = profile.Profile(\n id_user, user, id_team, team, money, color_prim, \n color_sec, seats, fans, ranking, streak\n )\n\n if (db.profile.find_one({\"id\": int(id_team)}) is not None):\n db.profile.replace_one(\n {\"id\": int(id_team)}, v_profile.to_db_collection())\n else:\n db.profile.insert_one(v_profile.to_db_collection())\n\n print(show(\"profile\") + \" > Perfil actualizado\")\n\n return id_team",
"async def create_profile_for_user(self, *, profile_create: ProfileCreate) -> ProfileInDB:\n created_profile = await self.db.fetch_one(query=CREATE_PROFILE_FOR_USER_QUERY, values=profile_create.dict())\n return ProfileInDB(**created_profile)",
"def get_user_details(self, response):\n values = {\n 'username': unquote(response['nick']),\n 'email': unquote(response['email']),\n 'first_name': unquote(response['first_name']),\n 'last_name': unquote(response['last_name'])\n }\n\n if values['first_name'] and values['last_name']:\n values['fullname'] = '%s %s' % (values['first_name'],\n values['last_name'])\n return values",
"def profile_details(self, profile_name):\n url = get_url('profile details', profile=profile_name)\n response = self._get(url)\n if response.status_code == 404:\n return None\n raise_on_error(response)\n return Profile(response.json())",
"def profile(self, user, **kwargs):\n # pylint: disable=no-member\n return self._get(API.USER.value.format(user_id=user), **kwargs)",
"def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)",
"def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }",
"def extract_user_short(data):\n user_pk = data.get(\"id\", data.get(\"pk\"))\n assert user_pk, 'User without pk \"%s\"' % data\n return {\n \"pk\": int(user_pk),\n \"username\": data[\"username\"],\n \"full_name\": data[\"full_name\"],\n \"is_private\": data.get(\"is_private\"),\n \"profile_pic_url\": data[\"profile_pic_url\"],\n \"is_verified\": data.get(\"is_verified\"),\n # \"is_unpublished\": data.get(\"is_unpublished\"),\n }",
"def getProfile(self, text):\n\n if self.url is None:\n raise Exception(\"No Personality Insights service is bound to this app\")\n response = requests.post(self.url + \"/v2/profile\",\n auth=(self.username, self.password),\n headers = {\"content-type\": \"text/plain\"},\n data=text\n )\n try:\n return json.loads(response.text)\n except:\n raise Exception(\"Error processing the request, HTTP: %d\" % response.status_code)",
"def get_facebook_user_info(access_token):\n required_data_list = []\n for per in settings.FACEBOOK_EXTENDED_PERMISSIONS:\n required_data_list.append(per.replace(\"user_\",\"\"))\n \n required_data_list.append(\"picture.type(large)\")\n required_data = (\", \").join([data for data in required_data_list])\n \n graph_url = \"https://graph.facebook.com/me?access_token=%s&fields=%s\" % (access_token,required_data)\n public_info_url = \"https://graph.facebook.com/me?access_token=%s\" % access_token\n \n profile = json.load(urllib.urlopen(graph_url))\n profile_info = json.load(urllib.urlopen(public_info_url))\n \n profile_response_dict = {}\n profile_response_dict.update(profile)\n profile_response_dict.update(profile_info)\n profile_response_json = json.dumps(profile_response_dict)\n\n return (profile_response_json, profile_response_dict)",
"def members(self):\n data = UserProfile.objects.filter(\n organization_id=self.id\n ).order_by(\n 'display_name', 'first_name', 'last_name'\n )\n\n return data",
"def _get_data(self):\n data = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # skip for factories for now\n continue\n value = getattr(self, name)\n raw_value = field.to_raw(value)\n if isinstance(field, fields.Secret):\n data[f\"__{name}\"] = raw_value\n else:\n data[name] = raw_value\n\n return data",
"def get_user_data(access_code):\n fields = 'id,first_name,gender,email,picture,age_range,last_name,birthday,favorite_athletes,favorite_teams,location,relationship_status,languages,link,cover,friends'\n payload = {'access_token': access_code, 'fields': fields}\n user_data = requests.get('https://graph.facebook.com/v2.7/me?', params=payload)\n return user_data.json()",
"def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }",
"def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }",
"def user_info(self):\n resp = self._get(get_url('user'))\n raise_on_error(resp)\n ret = resp.json()\n return UserInfo(ret)",
"def get_user_details(self, response):\n # Build the username with the team $username@$team_url\n # Necessary to get unique names for all of slack\n username = response.get('user')\n if self.setting('USERNAME_WITH_TEAM', True):\n match = re.search(r'//([^.]+)\\.slack\\.com', response['url'])\n username = '{0}@{1}'.format(username, match.group(1))\n\n out = {'username': username}\n if 'profile' in response:\n out.update({\n 'email': response['profile'].get('email'),\n 'fullname': response['profile'].get('real_name'),\n 'first_name': response['profile'].get('first_name'),\n 'last_name': response['profile'].get('last_name'),\n 'team_name': response.get('team_name')\n })\n return out",
"def populate_og_data(self, user):\n title = user.username if not user.profile.name else user.profile.name\n\n image_field = None\n # For image use reel thumbnail\n if user.profile.reel_thumbnail_16_9:\n image_field = user.profile.reel_thumbnail_16_9\n # Otherwise use the avatar\n elif user.profile.avatar:\n image_field = user.profile.avatar\n # Otherwise use the thumbnails of the latest posts\n elif user.post_set.filter(status='published'):\n latest_post = user.post_set.filter(status='published').last()\n if latest_post.thumbnail:\n image_field = latest_post.thumbnail\n\n image_alt = f\"{title} on anima.to\"\n\n return OgData(\n title=title,\n description=user.profile.bio,\n image_field=image_field,\n image_alt=image_alt,\n )",
"def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n context['user'] = self.request.user\r\n context['profile'] = self.request.user.profile\r\n return context",
"def get_profile_id(self):\n # get list of user profile\n results = self.service.userProfiles().list().execute()\n # TODO: create custom exceptions\n assert 'items' in results\n # TODO: create custom exceptions\n assert len(results['items'])\n # store profileId\n self.profileId = results['items'][0]['profileId']\n\n return self.profileId",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def build_profile(first, last, **user_info):\n\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def _get_custom_data(self):\n if self.uid in CUSTOM_THEME_DATA:\n return CUSTOM_THEME_DATA[self.uid]\n return {}",
"def extra_data(self, user, uid, response, details):\n try:\n return self.get_steam_profile(response)\n except:\n return \"\"",
"def get_user_profile(self):\n\t\treturn Job(SDK.PrlSrv_GetUserProfile(self.handle)[0])"
] |
[
"0.69240314",
"0.6456976",
"0.64322084",
"0.6248396",
"0.6248396",
"0.62341315",
"0.622801",
"0.6204653",
"0.6138357",
"0.611971",
"0.6103204",
"0.6066835",
"0.6032334",
"0.60309625",
"0.5982074",
"0.5944301",
"0.59439224",
"0.5938915",
"0.5907476",
"0.58976126",
"0.5869983",
"0.58552057",
"0.5839788",
"0.5809002",
"0.5780876",
"0.57699656",
"0.5752503",
"0.5743313",
"0.5703813",
"0.57016736",
"0.5683671",
"0.568248",
"0.56803495",
"0.5679427",
"0.5678088",
"0.5675138",
"0.56469005",
"0.56374544",
"0.5631148",
"0.56204623",
"0.5617109",
"0.5617109",
"0.559944",
"0.55915797",
"0.5589263",
"0.5589263",
"0.5589263",
"0.55787665",
"0.5566201",
"0.55652994",
"0.5555451",
"0.5554401",
"0.5548507",
"0.5547631",
"0.55377525",
"0.5532848",
"0.5530463",
"0.5529621",
"0.5520395",
"0.551589",
"0.551589",
"0.55016744",
"0.54856724",
"0.54853874",
"0.5484679",
"0.5476807",
"0.5468934",
"0.54533106",
"0.5448797",
"0.54265386",
"0.54236525",
"0.5415826",
"0.5399734",
"0.5399113",
"0.5384635",
"0.5382359",
"0.53777367",
"0.53723085",
"0.5371786",
"0.53701",
"0.53602815",
"0.5345759",
"0.53346175",
"0.5324284",
"0.53104526",
"0.5310334",
"0.53071696",
"0.5297807",
"0.5297807",
"0.52966243",
"0.52937037",
"0.52853435",
"0.52793896",
"0.52767396",
"0.52751786",
"0.52751786",
"0.52711964",
"0.52548707",
"0.5253949",
"0.52491206"
] |
0.6958784
|
0
|
Sets the profile_data of this UserBase. A dictionary containing custom profile field data for the user. Each entry maps the integer ID of a custom profile field in the organization to a dictionary containing the user's data for that field. Generally the data includes just a single `value` key; for those custom profile fields supporting Markdown, a `rendered_value` key will also be present.
|
def profile_data(self, profile_data):
self._profile_data = profile_data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_user_data(self, user_data, user_info):\n self.user_info = AttrDict(user_info)\n self.user_data = AttrDict(user_data)\n self.score_user = self.scoresaber.get_user_data(self.user_data.id)",
"def update_user_profile(req_data):\n logger.debug(\"entering function update_user_profile\")\n\n update_fields = {}\n for field in req_data:\n update_fields[field] = req_data[field]\n if \"password\" in req_data:\n update_fields[\"password\"] = generate_password_hash(req_data[\"password\"])\n\n find_query = {\"user_id\": current_user.id}\n update_query = {\"$set\": update_fields}\n run_update_one_query(config.USERS_COL, find_query, update_query,\n error=True, error_msg=PROFILE_UPDATE_FAILED_ERR_MSG)\n logger.info(\"Profile update success for %s\", current_user.id)\n\n logger.debug(\"exiting function update_user_profile\")\n return get_success_response(PROFILE_UPDATE_SUCCESS_MSG)",
"def custom_profile_fields(self, custom_profile_fields):\n\n self._custom_profile_fields = custom_profile_fields",
"def _parse_profile_data (self, netflix_page_data):\n profiles = {};\n important_fields = [\n 'profileName',\n 'isActive',\n 'isAccountOwner',\n 'isKids'\n ]\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for profile_id in netflix_page_data.get('profiles'):\n if self._is_size_key(key=profile_id) == False and type(netflix_page_data['profiles'][profile_id]) == dict and netflix_page_data['profiles'][profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n if important_field == 'profileName':\n profile.update({important_field: HTMLParser.HTMLParser().unescape(netflix_page_data['profiles'][profile_id]['summary'][important_field]).encode('utf8')})\n else:\n profile.update({important_field: netflix_page_data['profiles'][profile_id]['summary'][important_field]})\n avatar_base = netflix_page_data['nf'].get(netflix_page_data['profiles'][profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar, 'isFirstUse': False})\n profiles.update({profile_id: profile})\n return profiles\n\n # values are stored in lists (returned from JS parser)\n # TODO: get rid of this christmas tree of doom\n for item in netflix_page_data:\n if 'hasViewedRatingWelcomeModal' in dict(item).keys():\n for profile_id in item:\n if self._is_size_key(key=profile_id) == False and type(item[profile_id]) == dict and item[profile_id].get('avatar', False) != False:\n profile = {'id': profile_id}\n for important_field in important_fields:\n profile.update({important_field: item[profile_id]['summary'][important_field]})\n avatar_base = item['nf'].get(item[profile_id]['summary']['avatarName'], False);\n avatar = 'https://secure.netflix.com/ffe/profiles/avatars_v2/320x320/PICON_029.png' if avatar_base == False else avatar_base['images']['byWidth']['320']['value']\n profile.update({'avatar': avatar})\n profiles.update({profile_id: profile})\n return profiles",
"def save_profile(self, data, suffix=''):\n # pylint: disable=unused-argument\n self.display_name = data.get('display_name', self.display_name)\n\n users_included_email = data.get('users_included_email', self.users_included_email)\n try:\n regexp_string = self.regexp_from_users_included_email(users_included_email)\n re.compile(regexp_string)\n except:\n raise JsonHandlerError(400, 'Users to exclude by email is causing an error, please edit.')\n self.users_included_email = users_included_email\n\n self.profile_display_job_title = data.get('profile_display_job_title', self.profile_display_job_title)\n self.profile_display_organisation = data.get('profile_display_organisation', self.profile_display_organisation)\n self.profile_display_work_country = data.get('profile_display_work_country', self.profile_display_work_country)\n self.profile_display_email_button = data.get('profile_display_email_button', self.profile_display_email_button)\n self.profile_display_bio = data.get('profile_display_bio', self.profile_display_bio)\n self.enable_cohorts = data.get('enable_cohorts', self.enable_cohorts)",
"def update_profile(orcid_id, data=None):\n \n u = db.session.query(User).filter_by(orcid_id=orcid_id).first()\n if u:\n u.updated = datetime.utcnow()\n if data:\n u.profile = data\n # save the user\n db.session.begin_nested()\n try:\n db.session.add(u)\n db.session.commit()\n except exc.IntegrityError as e:\n db.session.rollback()\n # per PEP-0249 a transaction is always in progress \n db.session.commit()",
"def create(self, validated_data):\r\n user_data = validated_data.pop('user')\r\n user = UserSerializer.create(UserSerializer(), validated_data = user_data)\r\n profile, created = Profile.objects.update_or_create(user = user,\r\n bio = validated_data.pop('bio'),\r\n location = validated_data.pop('location'),\r\n birth_date = validated_data.pop('birth_date'))\r\n return profile",
"def save(self, profile_callback=None):\n\n # First, save the parent form\n new_user = super(BodbRegistrationForm, self).save(profile_callback=profile_callback)\n\n # Update user with first, last names\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Update profile with affiliation\n profile = new_user.get_profile()\n profile.affiliation = self.cleaned_data['affiliation']\n profile.save()\n\n cache.set('%d.profile' % new_user.id, profile)\n\n return new_user",
"def user_profile_data(id):\n user = User.query.get(id)\n return user.to_dict_profile()",
"def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = UserSerializer.create(UserSerializer(), validated_data=user_data)\n profile, created = Profile.objects.update_or_create(\n user=user,\n avatar=validated_data.pop('avatar'),\n biography=validated_data.pop('biography'),\n link=validated_data.pop('link') \n )\n return profile",
"def _profile(user):\n profile = UserProfile()\n profile.user_id = user.id\n profile.save()",
"def create(self, validated_data):\n request = self.context.get('request')\n profile = Profile(**validated_data)\n profile.user = request.user\n profile.save()\n return profile",
"def map_profile_fields(data, fields):\n profile = {}\n for dst, src in fields.items():\n if callable(src):\n value = src(data)\n else:\n value = data.get(src)\n\n if value is not None and value != '':\n profile[dst] = value\n\n return profile",
"def __init__(self, data=None, **kw):\n def _get_class_by_id(profile_id):\n from solariat_bottle.db.user_profiles.social_profile import DELIMITER, TwitterProfile, FacebookProfile\n pos = unicode(profile_id).rfind(DELIMITER) + 1\n if pos == 0:\n return self.__class__\n platform = None\n try:\n index = int(profile_id[pos:])\n except ValueError:\n logger.info(u\"Could not obtain platform from profile id: {}\".format(profile_id))\n else:\n platform = PLATFORM_BY_INDEX.get(index)\n class_ = {\n TwitterProfile.platform: TwitterProfile,\n FacebookProfile.platform: FacebookProfile\n }.get(platform, self.__class__)\n return class_\n\n if data:\n profile_id = data.get('_id')\n else:\n profile_id = kw.get('id')\n if isinstance(profile_id, basestring):\n self.__class__ = _get_class_by_id(profile_id)\n super(UserProfile, self).__init__(data, **kw)",
"def set_user_config(self, data):\n config = self.read_config_obj(self.account_file)\n for key, value in data.items():\n config.set(self.user, str(key), value)\n\n self.write_config(self.account_file, config)",
"def set_user_info(self, usrs):\r\n logger.info('Starting set user profile info')\r\n user = choice(usrs)\r\n self.title = user['title']\r\n self.fname = user['fname']\r\n self.lname = user['lname']\r\n self.email = user['email']\r\n self.password = user['password']\r\n self.dob = user['dob']\r\n self.company = user['company']\r\n self.address = user['address']\r\n self.city = user['city']\r\n self.postalcode = user['postalcode']\r\n self.phone = user['phone']\r\n logger.info('Ending set user profile info')",
"def user_custom_data(self, user_custom_data):\n\n self._user_custom_data = user_custom_data",
"def _parse_user_data (self, netflix_page_data):\n user_data = {};\n important_fields = [\n 'authURL',\n 'gpsModel',\n 'guid'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n user_data.update({important_field: netflix_page_data.get(important_field, '')})\n return user_data\n\n # values are stored in lists (returned from JS parser)\n for item in netflix_page_data:\n if 'memberContext' in dict(item).keys():\n for important_field in important_fields:\n user_data.update({important_field: item['memberContext']['data']['userInfo'][important_field]})\n\n return user_data",
"def save_user_profile(instance, **_):\n instance.profile.save()",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def build_profile(first, last, **user_info):\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def profile_data(self):\n return self._profile_data",
"def update_user_profile(user_info):\n user_id = user_info[\"USER_ID\"]\n user_collection.find_one_and_update(\n {\"_id\": user_id},\n {\n \"$set\": {\n \"username\": user_info[\"username\"],\n \"email\": user_info[\"email\"],\n \"avatar\": user_info[\"avatar\"],\n \"githubURL\": user_info[\"githubURL\"],\n \"linkedinURL\": user_info[\"linkedinURL\"],\n \"stackoverflowURL\": user_info[\"stackoverflowURL\"],\n \"skills\": user_info[\"skills\"],\n }\n },\n upsert=False,\n )",
"def build_profile(first, last, **user_info):\n\n user_info['first_name'] = first\n user_info['last_name'] = last\n return user_info",
"def user_profile(first, last, **add_info):\n profile = {}\n profile['firstname'] = first\n profile['lastname'] = last\n\n for key, value in add_info.items():\n profile[key] = value\n \n return profile",
"def user_profile():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user profile\")\n user_profile = get_user_profile(access_token)\n user_profile['access_token'] = access_token\n return json.dumps(user_profile)",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return profile",
"def user_data(self, access_token, *args, **kwargs):\n params = self.setting(\"PROFILE_EXTRA_PARAMS\", {})\n response = kwargs.get('response') or {}\n params[\"access_token\"] = access_token\n headers = {\n \"Authorization\": \"%s %s\" % (\n response.get(\"token_type\", \"Bearer\").capitalize(),\n access_token),\n \"Accept\": 'application/json',\n \"Content-type\": 'application/json;charset=utf-8'}\n return self.get_json(self.USER_DATA_URL,\n params=params, headers=headers)",
"def put(self, request):\n profile = Profile.get_by_id(request.user.id)\n if not profile:\n return HttpResponse(status=403)\n user = CustomUser.objects.get(id=request.user.id)\n update_data = json.loads(request.body.decode('utf-8'))\n user.update(first_name=update_data.get('first_name'),\n last_name=update_data.get('last_name'))\n profile.update(\n birthday=update_data.get('birthday'),\n gender=update_data.get('gender'),\n hobbies=update_data.get('hobbies'),\n facebook=update_data.get('facebook'))\n data = profile.to_dict()\n return JsonResponse(data, status=200)",
"def SetUserData(self, key, data):\n self._userdata[key] = data",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def build_profile(first, last, **user_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in user_info.items():\n profile[key] = value\n return(profile)",
"def update_profile():\n logger.debug(\"entering function update_profile\")\n response = update_user_profile(request.json)\n logger.debug(\"exiting function update_profile\")\n return jsonify(response)",
"def edit_profile():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify that user has logged in and the request is legit\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified != True: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # part2: check json\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_CREATE_USER_NO_JSON)\n if checked_json != True: return response\n # part3: verify json data\n try:\n user_email = login_session[\"login_user_email\"]\n except KeyError:\n # key error means we are offline til this far\n user_email = requested_json[\"email\"]\n # design decision: if there are invalid field names, only update the valid fields.\n # check updates keys and formats\n try:\n update_pairs = convert_to_underscore(requested_json[\"updates\"])\n \n if isinstance(update_pairs,dict) != True:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NON_DICT,400)\n else:\n correct_format,valid_update_pairs, response = process_request_json(User,update_pairs)\n if correct_format == True: \n update_field(User, session, {\"email\": user_email},valid_update_pairs)\n response = generate_message(MESSAGE_UPDATE_PROFILE_SUCCESS,200)\n except KeyError:\n response = generate_message(MESSAGE_UPDATE_PROFILE_NO_ENTRY,400)\n return response",
"def update_profile_data(self, **kwargs):\n # TODO: double check that the following will actually check if the user is not logged in, unit test\n if not self.uprofile:\n return None\n desc = kwargs.get('description', self.uprofile.description)\n self.uprofile.description = desc\n self.uprofile.save()\n return self.uprofile",
"def users_profile_update(self):\n email_query = request.args.get('email')\n if not email_query:\n self.logger.debug((messages.MISSING_FIELDS_ERROR % \"email\"))\n return messages.ERROR_JSON % (messages.MISSING_FIELDS_ERROR % \"email\"), 400\n token = auth.current_user()[1]\n content = request.form\n password = content[\"password\"] if \"password\" in content else None\n fullname = content[\"fullname\"] if \"fullname\" in content else None\n phone_number = content[\"phone_number\"] if \"phone_number\" in content else None\n photo = Photo.from_bytes(request.files['photo'].stream) if 'photo' in request.files else None\n try:\n self.auth_server.profile_update(email=email_query, user_token=token,\n password=password, fullname=fullname,\n phone_number=phone_number, photo=photo)\n except UnauthorizedUserError:\n self.logger.debug(messages.USER_NOT_AUTHORIZED_ERROR)\n return messages.ERROR_JSON % messages.USER_NOT_AUTHORIZED_ERROR, 403\n except UnexistentUserError:\n self.logger.debug(messages.USER_NOT_FOUND_MESSAGE % email_query)\n return messages.ERROR_JSON % (messages.USER_NOT_FOUND_MESSAGE % email_query), 404\n return messages.SUCCESS_JSON, 200",
"def _doProfile(self, save_request=None):\n prof = self._getProfileFromUser()\n # If saveProfile(), process user-modifyable fields\n if save_request:\n for field in ('displayName', 'teeShirtSize'):\n if hasattr(save_request, field):\n val = getattr(save_request, field)\n if val:\n print(val)\n setattr(prof, field, str(val))\n prof.put()\n # Return ProfileForm\n return self._copyProfileToForm(prof)",
"def build_profile(first, last, **user_info):\r\n # Build a dict with the required keys.\r\n profile = {'first': first, 'last': last}\r\n # Add any other keys and values.\r\n for key, value in user_info.items():\r\n profile[key] = value\r\n return profile",
"async def create_profile_for_user(self, *, profile_create: ProfileCreate) -> ProfileInDB:\n created_profile = await self.db.fetch_one(query=CREATE_PROFILE_FOR_USER_QUERY, values=profile_create.dict())\n return ProfileInDB(**created_profile)",
"def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None",
"def replace_user_data(self, new_data):\n self._user_data = new_data",
"def get_object_data(self, **kwargs):\n user = self.request.user\n return UserProfile.objects.get(user=user)",
"def basic_profile_fields(self, basic_profile_fields):\n\n self._basic_profile_fields = basic_profile_fields",
"def save(self, *args, **kwargs):\n\n kwargs[\"commit\"] = False\n user = super(JOSProfileForm, self).save(*args, **kwargs)\n\n try:\n profile = get_profile_for_user(user)\n profile_form = self.get_profile_fields_form()\n profile_form(self.data, self.files, instance=profile).save()\n except ProfileNotConfigured:\n pass\n\n return user",
"def edit_user_profile(request):\n user = request.user\n user_profile = UserProfile.objects.filter(user=user)[0]\n if request.method == 'POST':\n form = MemberProfileForm(request.POST)\n additional_form = MemberAdditionalProfileForm(request.POST)\n if form.is_valid() and additional_form.is_valid():\n cd = form.cleaned_data\n user.first_name = cd['first_name']\n user.last_name = cd['last_name']\n user.email = cd['email']\n user.save()\n if 'picture' in request.FILES:\n file = request.FILES['picture']\n user_profile.picture.save(file.name, file, save=True)\n user_profile.gravatar = additional_form.cleaned_data['gravatar']\n user_profile.save()\n return HttpResponseRedirect('/')\n else:\n form = MemberProfileForm(instance=request.user)\n additional_form = MemberAdditionalProfileForm(instance=user_profile)\n return render_to_response('edit_profile.html', locals())",
"def build_profile(self, update, context, phone=None, raw_text=None):\n user = update.effective_user\n chat_id = update.effective_chat.id\n log.info(\"PROFILE from %s `%s`\", chat_id, raw_text)\n # import pdb; pdb.set_trace()\n\n # If necessary, create the part of the state that holds data about registration procedures\n if \"registrations\" not in context.bot_data:\n context.bot_data[\"registrations\"] = {}\n\n if chat_id not in context.bot_data[\"registrations\"]:\n # create a new user profile and add it to the bot's state, so we can populate it\n # as we ask the user to provide info about themselves; keep in mind that it is an ORDERED dict, we'll\n # rely on this later!\n profile = OrderedDict(\n {\n c.PROFILE_FIRST_NAME: user.first_name, # may be empty at first\n c.PROFILE_LAST_NAME: user.last_name, # may be empty at first\n c.PROFILE_AVAILABILITY: None,\n c.PROFILE_ACTIVITIES: [],\n c.PROFILE_PHONE: phone,\n c.PROFILE_EMAIL: None,\n }\n )\n\n if not phone.startswith(c.LOCAL_PREFIX):\n # If the Telegram phone number is not a local number (i.e. it was registered abroad), we're moving it\n # to a different attribute, and clearing the original one, such that later in this function we shall\n # ask for a local phone number\n log.debug(\"Phone number is foreign, will ask for a local one\")\n profile[c.PROFILE_PHONE_FOREIGN] = phone\n profile[c.PROFILE_PHONE] = None\n\n context.bot_data[\"registrations\"][chat_id] = profile\n else:\n profile = context.bot_data[\"registrations\"][chat_id]\n\n for key, value in profile.items():\n if not value:\n # a part of the profile is empty, maybe we should ask about it?\n if raw_text:\n # This seems to be yet another call of this function, so raw_text contains the answer to the\n # question asked earlier - let's populate it.\n # NOTE that we use an OrderedDict when building the profile, so we know for sure this answer\n # goes to that particular question (i.e. key in the dict)\n profile[key] = raw_text\n raw_text = None\n continue\n\n # if we got this far, we stumbled upon the next missing part of the profile\n context.user_data[\"state\"] = c.State.EXPECTING_PROFILE_DETAILS\n\n self.updater.bot.send_message(\n chat_id=chat_id,\n text=c.PROFILE_QUESTIONS[key],\n parse_mode=ParseMode.MARKDOWN_V2,\n )\n\n if key == c.PROFILE_ACTIVITIES:\n # this is a special case, because we'll send them an interactive keyboard with options to chose from\n self.confirm_activities(update, context)\n return\n\n return\n\n # if we got this far, it means the profile is complete, inform the user about it\n self.updater.bot.send_message(\n chat_id=chat_id, text=c.MSG_ONBOARD_NEXT_STEPS, parse_mode=ParseMode.MARKDOWN,\n )\n\n # and the backend, but first let's augment the profile with more data\n profile[c.PROFILE_CHAT_ID] = chat_id\n self.backend.register_pending_volunteer(profile)\n context.user_data[\"state\"] = c.State.AVAILABLE\n\n # remove if from the state, because we don't need it anymore\n del context.bot_data[\"registrations\"][chat_id]\n\n # Also get rid of this user's individual keyboard for assitance activities\n context.user_data.pop(\"assist_keyboard\", None)",
"def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})",
"def user_update_profile():\n \n if 'userid' and 'email' not in request.forms:\n return {'status':'Failure','message':'User Id is missing,please try with correct data.'}\n \n data = user_obj.user_update_profile(request.forms)\n return data",
"def profile(**kwargs):\n defaults = {'name': 'Test K. User', 'bio': 'Some bio.',\n 'website': 'http://support.mozilla.com',\n 'timezone': None, 'country': 'US', 'city': 'Mountain View',\n 'locale': 'en-US'}\n if 'user' not in kwargs:\n u = user(save=True)\n defaults['user'] = u\n defaults.update(kwargs)\n\n p = Profile(**defaults)\n p.save()\n return p",
"def add_user_to_posted_data(data=None, **kwargs):\n\n if not is_anonymous():\n data['user_id'] = current_user.id\n\n #TODO Improve method of applying user_id to sub models\n # perhaps using get_related_model? looping through entities of array?\n if 'records' in data:\n for record in data['records']:\n record['user_id'] = current_user.id\n\n if 'images' in data:\n for image in data['images']:\n image['user_id'] = current_user.id",
"def set(self, **kwargs: Any) -> None: # nosec\n attributes = {}\n user_id: int = int(kwargs[\"user_id\"])\n user = self.first(id_int=user_id)\n\n for k, v in kwargs.items():\n if k in user.__attr_searchable__:\n attributes[k] = v\n\n if kwargs.get(\"email\", None):\n user.email = kwargs[\"email\"]\n elif kwargs.get(\"role\", None):\n user.role = kwargs[\"role\"]\n elif kwargs.get(\"name\", None):\n user.name = kwargs[\"name\"]\n elif kwargs.get(\"budget\", None):\n user.budget = kwargs[\"budget\"]\n elif kwargs.get(\"website\", None):\n user.website = kwargs[\"website\"]\n elif kwargs.get(\"institution\", None):\n user.institution = kwargs[\"institution\"]\n else:\n raise Exception\n\n attributes[\"__blob__\"] = _serialize(user, to_bytes=True)\n\n self.update_one(query={\"id_int\": user_id}, values=attributes)",
"def my_profile(first_name,last_name,**data): \r\n print('your provided data is as follow:')\r\n profile = {}\r\n profile['first_name'] = first_name\r\n profile['last_name'] = last_name\r\n for key, value in data.items():\r\n profile[key] = value\r\n print(profile)",
"def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def save_user_profile(sender, instance, **kwargs):\n instance.profile.save()",
"def get_context_data(self, **kwargs):\r\n context = super().get_context_data(**kwargs)\r\n context['user'] = self.request.user\r\n context['profile'] = self.request.user.profile\r\n return context",
"def getProfileInfo(self):\n doc = minidom.parse(urllib.urlopen(serverString + \"/rest/user/\" + self.name))\n for element in doc.getElementsByTagName(\"user\")[0].childNodes:\n if element.nodeType != minidom.Node.ELEMENT_NODE:\n continue\n elif element.tagName == \"status\" and int(element.firstChild.data) != 1:\n raise ServerError(element.firstChild.data)\n elif element.tagName == \"input\":\n self.name = element.firstChild.data\n elif element.tagName == \"id\":\n self.id = element.firstChild.data\n elif element.tagName == \"image\":\n self.image = element.firstChild.data\n elif element.tagName == \"tagline\":\n if element.firstChild == None:\n self.tagline = None\n else:\n self.tagline = element.firstChild.data\n elif element.tagName == \"creation\":\n self.created = datetime.datetime.strptime(element.firstChild.data[:element.firstChild.data.rfind(\".\")]+\".GMT\", \"%Y-%m-%d %H:%M:%S.%Z\")",
"def populate_og_data(self, user):\n title = user.username if not user.profile.name else user.profile.name\n\n image_field = None\n # For image use reel thumbnail\n if user.profile.reel_thumbnail_16_9:\n image_field = user.profile.reel_thumbnail_16_9\n # Otherwise use the avatar\n elif user.profile.avatar:\n image_field = user.profile.avatar\n # Otherwise use the thumbnails of the latest posts\n elif user.post_set.filter(status='published'):\n latest_post = user.post_set.filter(status='published').last()\n if latest_post.thumbnail:\n image_field = latest_post.thumbnail\n\n image_alt = f\"{title} on anima.to\"\n\n return OgData(\n title=title,\n description=user.profile.bio,\n image_field=image_field,\n image_alt=image_alt,\n )",
"def syslogserverprofiles(self, syslogserverprofile_id, data, tenant_id=None, api_version=\"v2.0\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/syslogserverprofiles/{}\".format(api_version,\n tenant_id,\n syslogserverprofile_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def create(self, request, *args, **kwargs):\n response = super().create(request, *args, **kwargs)\n profile = response.data\n user_name = profile.get(\"username\")\n cache.set(f\"{USER_PROFILE_PREFIX}{user_name}\", profile)\n return response",
"def build_profile(first,last,**userInfo):\r\n #empty dictionary to hold the user's profile.\r\n profile={}\r\n profile['firstName']=first\r\n profile['lastName']=last\r\n\r\n \"\"\"loop though the additional key-value pairs in the dictionary userInfo and add each pair to the profile dictionary.\"\"\"\r\n for key, value in userInfo.items():\r\n profile[key]=value\r\n return profile",
"def _set_user_field(self, user_id: int, field: str, value: str, data):\n if field not in Users.USER_ATTR_WHITELIST:\n raise ForbiddenArgument(\"invalid field\")\n\n # Do an assortment of checks\n if field == \"username\" and self.rc.hexists(\"user:by_username\", value):\n raise UsernameAlreadyExists(\"username taken\")\n if field == \"email\" and self.rc.hexists(\"user:by_email\", value):\n raise EmailAlreadyRegistered(\"email already registered\")\n if field == \"password\":\n # Hash password\n value = self._hash_password(value)\n if field == \"reg_on\":\n raise ForbiddenArgument(\"can't update reg_on via _set_user_field\")\n\n response = decode(self.rd.hset(f\"user:{user_id}\", field, value))\n\n # Update cache if needed\n if field == \"username\":\n prev = data[\"username\"]\n self.cache.cache_user_field_update(user_id, FieldUpdateType.USERNAME_UPDATE, prev, value)\n elif field == \"email\":\n prev = data[\"email\"]\n self.cache.cache_user_field_update(user_id, FieldUpdateType.EMAIL_UPDATE, prev, value)\n\n return response",
"def init_data_for_user_infos(db_data):\n user_infos = db_data.get('user_info')\n if user_infos is not None:\n rows = user_infos.get('data')\n for row in rows:\n user_info = UserInfo(\n name=row[0], first_name=row[1], last_name=row[2],\n position=row[3], company=row[4], nationality=row[5],\n tobe_contacted=row[6], skills_have=row[7],\n skills_learned=row[8]\n )\n db_add_and_commit(db, user_info)",
"def parse_user_fields(json_data):\n # Populate the fields\n user_info = {}\n for db_field, json_field in Users.UserJSON.fields.items():\n try:\n user_info[db_field] = get_json_field(json_data, json_field)\n if db_field == 'user_address_street':\n user_info[db_field] = user_info.get(db_field).replace('\\n', '')\n elif (db_field == 'user_first_login') or (db_field == 'user_last_login'):\n raw_timestamp = user_info.get(db_field)[:19]\n user_info[db_field] = core_utils.validate_timestamp(raw_timestamp)\n elif db_field == 'user_tags':\n user_info[db_field] = ', '.join(user_info.get(db_field))\n elif db_field == 'user_profile':\n profile = user_info[db_field]\n for idx in range(len(profile)):\n if profile[idx]['jive_label'] in Users.UserJSON.profile_fields:\n profile_field_name = Users.UserJSON.profile_fields.get(profile[idx]['jive_label'])\n user_info[profile_field_name] = profile[idx]['value']\n del user_info['user_profile']\n except (KeyError, IndexError, AttributeError):\n # Continue on to the next field\n continue\n # Return the user information\n return user_info",
"def _add_user(data: dict) -> dict:\n user = create_user()\n name = []\n if 'first_name' in data:\n name.append(data['first_name'])\n if 'middle_name' in data:\n name.append(data['middle_name'])\n if 'last_name' in data:\n name.append(data['last_name'])\n user['name'] = ' '.join(name)\n if 'role' in data:\n user['exp']['exp']['title'] = data['role']\n if 'affiliation' in data:\n user['abs'] = data['affiliation']\n user['exp']['exp']['company'] = data['affiliation']\n elif 'organization' in data:\n user['abs'] = data['organization']\n user['exp']['exp']['company'] = data['organization']\n phone = []\n if 'phone' in data:\n phone.append(data['phone'])\n if 'phone_ext' in data:\n phone.append(data['phone_ext'])\n user['contact']['phone'] = '-'.join(phone)\n user['contact']['email'] = data['email'] if 'email' in data else ''\n if 'degrees' in data:\n if not user.title:\n user['edu']['degree'] = data['degrees']\n if len(user['name']) < 0:\n user['name'] = user['contact']['email'] if len(user['contact']['email']) > 0 else 'Anonymous'\n return user",
"def update_user_profile(id):\n token = request.json['token']\n u = user.User.query.filter(user.User.token == token).first()\n if u is None:\n abort(404)\n if u.id != id:\n print \"user id is wrong.\" #TODO: Support log system\n abort(500)\n u.name = request.json['name']\n u.nickname = request.json['nickname']\n u.company = request.json['nickname']\n with store_context(fs_store):\n with open(files.path(request.json['header'])) as f:\n u.header_icon.from_file(f)\n db.session.merge(u)\n db.session.commit()\n db.session.merge(u)\n db.session.commit()\n return jsonify(u.to_dict())",
"def profile(user, **kwargs):\n defaults = {'user': user, 'name': 'Test K. User', 'bio': 'Some bio.',\n 'website': 'http://support.mozilla.com',\n 'timezone': None, 'country': 'US', 'city': 'Mountain View'}\n defaults.update(kwargs)\n\n p = Profile(**defaults)\n p.save()\n return p",
"def update_user_profile_info(user_id, user_fname, user_lname, email):\n \n user=User.query.filter(User.user_id == user_id).first()\n\n if email != None:\n user.update_email(email)\n if user_fname != None:\n user.update_first_name(user_fname)\n if user_lname != None:\n user.update_last_name\n \n db.session.commit()",
"def parse(self):\n details = self.details()\n\n return Profile(\n book_id=self.book_id,\n title=self.title(),\n user_id=self.user_id(),\n username=self.username(),\n summary=self.summary(),\n published=self.published(),\n updated=self.updated(),\n **details\n )",
"def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)",
"def create_user_profile_callback(sender, instance, created, **kwargs):\n try:\n instance.get_profile()\n except UserProfile.DoesNotExist:\n UserProfile.objects.create(user=instance)",
"def get_profile_data(auth, db):\n\n id_team, user, team, money, color_prim, color_sec = analyze_init(auth, db)\n id_user, seats, fans, ranking, streak = analyze_team_page(auth, db, id_team)\n\n \n v_profile = profile.Profile(\n id_user, user, id_team, team, money, color_prim, \n color_sec, seats, fans, ranking, streak\n )\n\n if (db.profile.find_one({\"id\": int(id_team)}) is not None):\n db.profile.replace_one(\n {\"id\": int(id_team)}, v_profile.to_db_collection())\n else:\n db.profile.insert_one(v_profile.to_db_collection())\n\n print(show(\"profile\") + \" > Perfil actualizado\")\n\n return id_team",
"def create_or_update_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.get_or_create(user=instance)\n instance.profile.save()",
"def profile(self, data):\n\n # need to store the output of each morphism in the analysis, which forms the training data later on\n self.profile = {}\n cur_output = data\n for morph in self.analysis.morphisms:\n cur_name = morph.name\n cur_output = morph.apply(cur_output)\n self.profile[cur_name] = cur_output",
"def put_userdata(self, nick, datakey, data):\n nickkey = irc.strings.lower(nick)\n if nickkey in self.users:\n alldata = self.users[nickkey]\n alldata[datakey] = data",
"def update_profile(self, data, sample_size=None, min_true_samples=None):\n encoding = None\n file_type = None\n\n if min_true_samples is not None \\\n and not isinstance(min_true_samples, int):\n raise ValueError('`min_true_samples` must be an integer or `None`.')\n\n if isinstance(data, data_readers.base_data.BaseData):\n encoding = data.file_encoding\n file_type = data.data_type\n data = data.data\n elif isinstance(data, self._allowed_external_data_types):\n file_type = str(data.__class__)\n else:\n raise TypeError(\n f\"Data must either be imported using the data_readers or using \"\n f\"one of the following: {self._allowed_external_data_types}\"\n )\n\n if not len(data):\n warnings.warn(\"The passed dataset was empty, hence no data was \"\n \"profiled.\")\n return\n\n # set sampling properties\n if not min_true_samples:\n min_true_samples = self._min_true_samples\n if not sample_size:\n sample_size = self._get_sample_size(data)\n\n self._update_profile_from_chunk(data, sample_size, min_true_samples)\n\n # set file properties since data will be processed\n if encoding is not None:\n self.encoding = encoding\n if file_type is not None:\n self.file_type = file_type",
"def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()",
"def create_or_update_user_profile(sender, instance, created, **kwargs):\n _, created = UserProfile.objects.get_or_create(user=instance)\n if created and instance.email != \"\":\n instance.profile.email = instance.email\n instance.profile.save()",
"def SetUserData(self, l):\r\n\r\n self.user_data = l",
"def prepare_user_data(self, user_risk_profile_qs):\n\n for user_risk_profile in user_risk_profile_qs:\n user = user_risk_profile.user\n portfolio_value = 0\n container = user.get_container_investments()\n\n if container:\n portfolio_value = container.get_value()\n\n suggested_score = self.__get_suggested_risk_score(user_risk_profile)\n\n self.users_risk_score_list.append(dict(\n user_id=user.app_uid,\n selected_user_risk_score=user_risk_profile.risk_profile.value,\n suggested_user_risk_score=suggested_score,\n risk_score_date_save=format_date_long(\n user_risk_profile.last_modified),\n investments_portfolio_value=portfolio_value\n ))",
"def user_profileImg(id):\n data = request.get_json(force=True)\n\n user = User.query.get(id)\n user.profileImg = data['profileImg']\n db.session.commit()\n return {'user': user.to_dict()}",
"def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile",
"def update(self, instance, validate_data):\n instance.first_name = validate_data.get('first_name', instance.first_name)\n instance.middle_name = validate_data.get('middle_name', instance.middle_name)\n instance.last_name = validate_data.get('last_name', instance.last_name)\n instance.email = validate_data.get('email', instance.email)\n instance.username = validate_data.get('username', instance.username)\n instance.mobile_number = validate_data.get('mobile_number', instance.mobile_number)\n instance.gender = validate_data.get('gender', instance.gender)\n instance.is_active = validate_data.get('is_active', instance.is_active)\n instance.country = validate_data.get('country', instance.country)\n instance.address = validate_data.get('address', instance.address)\n instance.role = validate_data.get('role', instance.role)\n\n if 'password' in validate_data:\n instance.set_password(validate_data.get('password'))\n if self.context['request'].data.get('file_profile_picture') is not None:\n if self.context['request'].data.get('file_profile_picture') == 'null':\n instance.profile_picture = None\n else:\n instance.profile_picture = self.context['request'].data['file_profile_picture']\n if self.context['request'].data.get('file_signature') is not None:\n if self.context['request'].data.get('file_signature') == 'null':\n instance.signature = None\n else:\n instance.signature = self.context['request'].data['file_signature']\n instance.save()\n return instance",
"def update_user_personal_data():\n if not 'user' in session:\n raise InvalidUsage(\"Access denied\", 401)\n\n data = request.json\n if 'name' not in data or not data['name']:\n raise InvalidUsage(\"Name must not be empty\", 422)\n if 'surname' not in data or not data['surname']:\n raise InvalidUsage(\"Surname must not be empty\", 422)\n if 'currentPassword' not in data or len(data['currentPassword']) < 6:\n raise InvalidUsage(\"Current password must have more then 5 characters\", 422)\n\n database = mysql.get_db()\n cursor = database.cursor()\n activeUser = session.get('user')\n\n query = '''SELECT password\n FROM users\n WHERE users.id = %s'''\n\n cursor.execute(query, (activeUser['id']))\n user = cursor.fetchone()\n\n if not bcrypt.check_password_hash(user['password'], data['currentPassword']):\n raise InvalidUsage(\"Wrong current password\", 401)\n\n query = '''UPDATE users\n SET name = %s, surname = %s\n WHERE id = %s'''\n\n cursor.execute(query, (data['name'], data['surname'], session.get('user')['id']))\n database.commit()\n\n activeUser['name'] = data['name']\n activeUser['surname'] = data['surname']\n session['user'] = activeUser\n\n return jsonify({'message': 'Successfully updated'}), 200",
"def _username_to_profile(self, username: str) -> Dict[str, Any]:\n\n base_url = self.keys.pre_profile + username + self.keys.rank_token + self.keys.post_profile\n\n # Build the page source url for the given user's account\n con = urllib.request.urlopen(base_url)\n user_profile = con.read().decode('utf-8')\n\n # Convert the webpage to a profile JSON\n profile: dict = json.loads(str(user_profile))\n return profile",
"def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile",
"def ipsecprofiles(self, ipsecprofile_id, data, tenant_id=None, api_version=\"v2.1\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/ipsecprofiles/{}\".format(api_version,\n tenant_id,\n ipsecprofile_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def update(self, instance, data):\n # extract password out of provided data\n # default value required for pop(), use None here\n passwd = data.pop('password', None)\n # simply update other fields\n user = super().update(instance, data)\n if passwd:\n user.set_password(passwd)\n user.save()\n return user",
"def get_meta(self) -> Meta:\n return Meta(\n object_type=\"profile\",\n extra_custom_props=[\n (\"property\", \"profile.username\", self.user.username),\n (\"property\", \"profile.first_name\", self.user.first_name),\n (\"property\", \"profile.last_name\", self.user.last_name),\n ]\n if self.user\n else [],\n title=self.display_name or self.name,\n image=self.image.large,\n )",
"def user_profile():\n user = current_user\n user_is_valid = True\n if not user.active:\n flash('This user account is under review. Please update your profile '\n + ' and contact the organizing team to access all functions of '\n + 'this platform.', 'warning')\n\n form = UserForm(obj=user, next=request.args.get('next'))\n form.roles.choices = [(r.id, r.name) for r in Role.query.order_by('name')]\n\n # Check conflicting PKs\n if form.email.data != user.email:\n if User.query.filter_by(email=form.email.data).first() is not None:\n flash('This e-mail address is already registered.', 'error')\n user_is_valid = False\n\n if user.sso_id:\n # Do not allow changing password on SSO\n del form.password\n\n # Validation has passed\n if form.is_submitted() and form.validate() and user_is_valid:\n # Assign roles\n user.roles = [Role.query.filter_by(\n id=r).first() for r in form.roles.data]\n del form.roles\n\n # Sanitize username\n user.username = sanitize_input(form.username.data)\n del form.username\n\n # Assign password if changed\n originalhash = user.password\n form.populate_obj(user)\n # Do not allow changing password on SSO\n if not user.sso_id:\n if form.password.data:\n user.set_password(form.password.data)\n else:\n user.password = originalhash\n\n user.updated_at = datetime.utcnow()\n db.session.add(user)\n db.session.commit()\n user.socialize()\n flash('Profile updated.', 'success')\n return redirect(url_for('public.user', username=user.username))\n\n if not form.roles.choices:\n del form.roles\n else:\n form.roles.data = [(r.id) for r in user.roles]\n return render_template('public/useredit.html',\n oauth_type=oauth_type(),\n user=user, form=form,\n active='profile')",
"def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n # Get the profile, the `save` method above creates a profile for each\n # user because it calls the manager method `create_user`.\n # See: https://github.com/django-userena-ce/django-userena-ce/blob/master/userena/managers.py#L65\n profile = new_user.my_profile\n profile.gender = self.cleaned_data['gender']\n profile.education = self.cleaned_data['education']\n profile.birthday = self.cleaned_data['birthday']\n profile.annual_income = self.cleaned_data['annual_income']\n profile.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user",
"def _process_plugin_data(self, fields, fetch_related_data=False):\n for field, default_value in fields:\n try:\n setattr(\n self.data,\n field,\n self.plugin_data.get(field, default_value)\n )\n except Exception:\n setattr(self.data, field, default_value)",
"def dnsserviceprofiles(self, dnsserviceprofile_id, data, tenant_id=None, api_version=\"v2.1\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/dnsserviceprofiles/{}\".format(api_version,\n tenant_id,\n dnsserviceprofile_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def putProfile(profileType,value):\n # PUT /profile/$profileType\n pass",
"def _profile_to_InstagramUser(profile: Dict[str, Any]) -> _InstagramUser:\n # Navigate to the user JSON that is coincidentally used by the provided API methods\n user = profile['users'][0]['user']\n\n # Simply build our InstagramUser, as the user JSON is the same\n return _InstagramUser(user)",
"def profile():\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n form = UserEditForm(obj=g.user)\n\n if form.validate_on_submit():\n if not User.authenticate(g.user.username, form.data[\"password\"]):\n flash(\"Invalid password.\", \"danger\")\n return render_template('/users/edit.html', form=form) \n # data = {k:v for k,v in form.data.items() if k != \"csrf_token\"}\n # data[\"image_url\"] = data[\"image_url\"] or None\n # data[\"header_image_url\"] = data[\"header_image_url\"] or None\n\n g.user.username = form.data[\"username\"]\n g.user.email = form.data[\"email\"]\n g.user.image_url = form.data[\"image_url\"] or None\n g.user.header_image_url = form.data[\"header_image_url\"] or None\n g.user.bio = form.data[\"bio\"]\n\n db.session.commit()\n\n flash(\"Profile edited!\", \"success\")\n return redirect(f'/users/{g.user.id}')\n\n return render_template('/users/edit.html', form=form)"
] |
[
"0.6163082",
"0.6090175",
"0.6075812",
"0.6008002",
"0.59890383",
"0.5896696",
"0.5854722",
"0.58466715",
"0.58388495",
"0.58365136",
"0.58214825",
"0.58029795",
"0.58013165",
"0.57626474",
"0.5759382",
"0.5732638",
"0.56859213",
"0.56347704",
"0.5612301",
"0.5607427",
"0.5607427",
"0.55879056",
"0.5559909",
"0.55548525",
"0.5544581",
"0.55234855",
"0.5515105",
"0.5515105",
"0.5515105",
"0.55126953",
"0.55003816",
"0.55003816",
"0.54652727",
"0.5460412",
"0.5458159",
"0.54415977",
"0.54415977",
"0.5437911",
"0.5436452",
"0.5430342",
"0.54245573",
"0.54178524",
"0.54116815",
"0.5405034",
"0.5398997",
"0.5398224",
"0.53570634",
"0.53237224",
"0.53228617",
"0.53131515",
"0.5311676",
"0.5275434",
"0.52709013",
"0.5224714",
"0.5219474",
"0.521404",
"0.5213915",
"0.52133363",
"0.52133363",
"0.5208343",
"0.52066296",
"0.5203915",
"0.518923",
"0.5185446",
"0.5175689",
"0.5168419",
"0.5165907",
"0.51647353",
"0.5161851",
"0.51608497",
"0.51597375",
"0.5157757",
"0.51444584",
"0.5140205",
"0.51387155",
"0.51321363",
"0.51315916",
"0.5131263",
"0.5129589",
"0.5127119",
"0.5125272",
"0.5107962",
"0.50938755",
"0.5088152",
"0.5085996",
"0.5072546",
"0.50702316",
"0.50672287",
"0.50500685",
"0.50391954",
"0.5037255",
"0.5035893",
"0.5031316",
"0.50223404",
"0.50215495",
"0.5021234",
"0.502043",
"0.50159824",
"0.50125337",
"0.5009642"
] |
0.7260224
|
0
|
Problem 1 Version 3 Use a formula to determine the additional sum 15 integers at a time, then use the iterative approach for any remaining integers in the range.
|
def solution(resources, args):
retval = 0
repeats = [3, 5, 6, 9, 10, 12, 15]
i = 0
n = args.number - 1
while n > 15:
retval += sum(repeats)
retval += 15*len(repeats)*i
n -= 15
i += 1
while n >= 3:
if n % 3 == 0 or n % 5 == 0:
retval += 15*i + n
n -= 1
return retval
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def triangular_number_solution():\n return 5 * partial_sum(199) + 3 * partial_sum(333) - 15 * partial_sum(66)",
"def solveProblem021():\n total = 0\n for i in range(2, 10000):\n divs = getProperDivisors(i)\n s = sum(divs)\n # Skip stuff greater than, we'll get to it later if it's less than max.\n if s > i:\n continue\n if s == i:\n continue\n t = sum(getProperDivisors(s))\n if t == i:\n total = total + i + s\n print(\"The Sum is: %d\" % (total,))",
"def problem001():\n\n sum = 0\n for i in range(199):\n i += 1\n if i % 3 == 0:\n continue\n sum += i*5;\n for i in range(333):\n i += 1\n sum += i*3\n print sum\n\n sum = 0\n for i in range(999):\n i += 1\n if (i % 3 == 0 or i % 5 == 0):\n sum += i\n print sum\n\n print reduce(lambda x,y: x+y, filter(lambda n: n%3==0 or n%5==0, range(1000)))\n #arithmetic progressions\n #http://en.wikipedia.org/wiki/Arithmetic_progression\n x = 1000;\n print 1.5*(int)((x-1)/3)*(int)((x+2)/3) + 2.5*(int)((x-1)/5)*(int)((x+4)/5) - 7.5*(int)((x-1)/15)*(int)((x+14)/15);",
"def solution(number):\n\n # a is a list with all the numbers below input that are multiples of 3 or 5\n a = [x for x in range(1,number) if x % 3 == 0 or x % 5 == 0]\n return sum(a)",
"def sumTo(n):\n\n sum_all = (n * (n+1))/2\n\n return sum_all",
"def problem1():\n return sum(i for i in range(1000) if i % 3 == 0 or i % 5 == 0)",
"def exercise2():\n # You _DO_NOT_ need to modify this code for Lab 10.\n n = easygui.integerbox( \"Enter n:\", \"Input\", lowerbound=0, upperbound=2 ** 31 )\n\n s = summation( n, 1 )\n f = n * ( n + 1 ) // 2\n easygui.msgbox( \"n = {}, summation( n, 1 ) = {}, formula result = {}\".format( n, s, f ) )\n\n s = summation( n, 2 )\n f = n * ( n + 1 ) * ( 2 * n + 1 ) // 6\n easygui.msgbox( \"n = {}, summation( n, 2 ) = {}, formula result = {}\".format( n, s, f ) )\n\n s = summation( n, 3 )\n f = ( n * ( n + 1 ) // 2 ) ** 2\n easygui.msgbox( \"n = {}, summation( n, 3 ) = {}, formula result = {}\".format( n, s, f ) )",
"def sumTo(n):\n \n the_sum = 0 #current sum\n a_number = 1 #where we are\n while a_number <= n:\n the_sum += a_number\n a_number += 1\n return the_sum",
"def solution(n: int = 28123) -> int:\n\n nums = range(1, n+1)\n abundant = list(filter(is_abundant, nums))\n abundant_sums = set(all_sums(abundant, n))\n fit = set(nums) - abundant_sums\n return fit",
"def sum_numbers_one_to_ten():\n sum=0\n for num in range(1,11):\n sum=sum+num\n return sum\n pass",
"def get_partial_sum(n: int, j: int) -> float:\n first_term = sum(\n modulo_exp(16, n - k, 8 * k + j) / (8 * k + j) for k in range(0, n + 1)\n )\n first_term = first_term - math.floor(first_term)\n\n k = n + 1\n second_term = 0\n while True:\n max_rest_term = 16 ** (n - k) / max(8 * k, 1) / 15\n if math.floor(second_term * 16) == math.floor(\n (second_term + max_rest_term) * 16\n ):\n break\n\n second_term += 16 ** (n - k) / (8 * k + j)\n k += 1\n\n res = first_term + second_term\n res = res - math.floor(res)\n\n return res",
"def get_3_5_sum(num):\n nums = range(num)\n mults = [num for num in nums if (num % 3 == 0 or num % 5 == 0)]\n return sum(mults)",
"def task16_sum_up_until():\n num = int(input('Enter how many numbers to sum up\\n'))\n result = 0\n for i in range(num + 1):\n result += i\n return result",
"def sum_to(n):\n the_sum = 0\n for counter in range(n+1):\n the_sum = the_sum + counter\n return the_sum",
"def integer_sum(n):\n\n sum = 0\n k = 0\n\n # INVARIANT\n # The sum of far is equal to the sum of the first k integer numbers\n # VARIANT: n-k\n #\n while (k!=n):\n k += 1\n sum += k\n\n return sum",
"def sum_range_multiples_3_5(min, max):\n\ttotal = 0\n\tfor i in range(min,max):\n\t\tif (i % 3 == 0) or (i % 5 == 0):\n\t\t\ttotal += i\n\treturn total",
"def method2():\n n = 1000\n s = 0\n multiples = [3,5]\n total = []\n\n for m in multiples:\n total.append(0)\n\n minValue = 0\n while(minValue < 1000):\n minValue = 1000\n minPosition = 0\n for i, v in enumerate(total):\n if v < minValue:\n minValue = v\n minPosition = i\n\n temp = total[minPosition] + multiples[minPosition]\n\n if(temp < 1000) and (temp not in total):\n s += temp\n\n total[minPosition] = temp\n\n return s",
"def solve(n=4 * 10**6):\r\n target_sum = 0\r\n fib_sequence = fibonacci_generator()\r\n for num in fib_sequence:\r\n if num >= n:\r\n break\r\n\r\n if num % 2 == 0:\r\n target_sum += num\r\n\r\n return target_sum",
"def multiples_sum_3_5(n):\n\tsum = 0\n\tfor x in range(1,n):\n\t\tif (x%3==0) or (x%5==0):\n\t\t\tsum += x\n\treturn sum",
"def sumTotal(n):\n\n sum_total = 0\n\n for i in range(1, n+1):\n sum_total = sum_total + i\n\n return sum_total",
"def find_sum():\n term_1 = 1\n term_2 = 2\n total = 2\n while True:\n new_term = term_1 + term_2\n\n # Break if passing upper bound\n if new_term > UPPER_BOUND:\n break\n\n if new_term % 2:\n total += new_term\n\n term_1 = term_2\n term_2 = new_term\n\n print \"Sum: {0}\".format(total)",
"def sum_to_n(n):\n total = 0\n for i in range(1,n+1):\n total += i\n return total",
"def totalSolutions(n:int):\n\n memo = [0]*6\n memo[5] = 1\n if n == 1:\n return 1\n\n for i in range(n):\n solutions = sum(memo)\n memo.pop(0)\n memo.append(solutions)\n\n return memo.pop()",
"def add_up(num):\n aList = list(range(1, num + 1))\n sum = 0\n\n for item in aList:\n sum = add_together(sum, item)\n# print(\"NOW SUM IS: \" + str(sum))\n\n return sum",
"def fn(i, k):\n if i == len(nums): return 0\n if k < 0: return inf \n ans = inf\n rmx = -inf # range max \n rsm = 0 # range sum \n for j in range(i, len(nums)): \n rmx = max(rmx, nums[j])\n rsm += nums[j]\n ans = min(ans, rmx*(j-i+1) - rsm + fn(j+1, k-1))\n return ans",
"def sum_amicable(limit):\n\n def find_amicable_pair(n):\n check_n= 0\n potential_half = 0\n for i in range(1,n):\n if n % i == 0:\n potential_half += i\n for i in range(1, potential_half):\n if potential_half % i == 0:\n check_n += i\n if check_n == n and n != potential_half: # exclude self amicable\n result.append(n)\n result.append(potential_half)\n\n result = []\n for num in range(1, limit):\n if num not in result:\n find_amicable_pair(num)\n return sum(result)",
"def problem():\n size = 1001\n return sum(n**2 * 4 - 6 * n + 6 for n in range(3, size+1, 2)) + 1",
"def test_large_sum(self):\n for n in [10, 20, 30, 40, 50]:\n A = np.arange(n*n)\n A = np.reshape(A, (n, n))\n x = Variable(n, n)\n p = Problem(Minimize(at.sum_entries(x)), [x >= A])\n result = p.solve()\n answer = n*n*(n*n+1)/2 - n*n\n print(result - answer)\n self.assertAlmostEqual(result, answer)",
"def summationLoop(lower, upper):\r\n sum = 0\r\n for i in range(lower, upper + 1):\r\n sum += i\r\n return sum",
"def solution(n):\n i = 1\n j = 2\n sum = 0\n while j <= n:\n if j % 2 == 0:\n sum += j\n i, j = j, i + j\n\n return sum",
"def sum(n):\n if n == 0:\n return 0\n return sum(n - 1) + n",
"def sum_of_nth( n ):\n if n > 0:\n return sum( range(n + 1) )\n else:\n return 0",
"def solve():\n sum_of_squares = 0\n for i in range(1, 101):\n sum_of_squares += i * i\n square_of_sum = sum(range(1, 101)) ** 2\n return square_of_sum - sum_of_squares",
"def minSums(numbers, num_sum):\n\n def generate_permutations(numbers):\n \"\"\" [string,] Given a string of numbers, generate all possible permutations\n of the numbers with plusses in between. \"1\" returns \"1\".\n \"11\" returns [\"11\", \"1+1\"], etc \"\"\"\n\n permutations = list()\n temp = list()\n # Generate all possible permutations of numbers and plusses, record\n # the number of plus signs as cost.\n for i, num in enumerate(numbers):\n # Base case, append the number and cost of 0\n if i == 0:\n permutations.append((num, 0))\n else:\n # Iterate through permutations, appending new items to temp.\n # Strings can be permutated two ways: string + char,\n # string + '+' + char\n for item in permutations:\n temp.append((item[0] + num, item[1]))\n temp.append((item[0] + '+' + num, item[1] + 1))\n # Now we move temp to permutations and clear out temp.\n permutations = temp\n temp = list()\n return permutations\n\n def clean_eval(exp):\n \"\"\" (int) Evaluate expression, ensuring we account for weirdness with\n leading zeros, etc. \"\"\"\n\n # Split expression using '+' as our split token\n number_string = exp.split(\"+\")\n total = int()\n\n # Cost each number string to int, cleaning up leading zeros, then total\n for num in number_string:\n total += int(num)\n\n return total\n\n # Create a dictionary of each permutations' sum and cost. Cost is defined as\n # the total number of additions necessary to generate the sum.\n value_costs = dict()\n\n # Iterate through permutations and populate values and their\n # respective costs into value_costs. If we find two equal values\n # with differing costs, use the lower cost.\n for perm in generate_permutations(numbers):\n value = clean_eval(perm[0])\n cost = perm[1]\n # Default the cost to 20 as an upper limit, given our number\n # will never cost more than 9 given 10 characters max.\n if value_costs.get(value, 20) > cost:\n value_costs[value] = cost\n\n return value_costs.get(num_sum, -1)",
"def main(n):\n return sum(f(i) for i in xrange(n))",
"def multiplesof3and5(to):\n sum = 0\n for i in range(1, to):\n if (i % 3 == 0) or (i % 5 == 0):\n sum += i\n return sum",
"def sum_n_m(n, m):\n total = 0\n for i in range(n, m+1):\n total += i\n return total",
"def one():\r\n \r\n i = 1\r\n sum = 0\r\n while i < 1000:\r\n if i % 3 == 0 or i % 5 == 0:\r\n sum = sum + i\r\n i = i + 1\r\n else:\r\n i = i + 1\r\n return sum",
"def solution(limit=28123):\n sum_divs = [1] * (limit + 1)\n\n for i in range(2, int(limit**0.5) + 1):\n sum_divs[i * i] += i\n for k in range(i + 1, limit // i + 1):\n sum_divs[k * i] += k + i\n\n abundants = set()\n res = 0\n\n for n in range(1, limit + 1):\n if sum_divs[n] > n:\n abundants.add(n)\n\n if not any((n - a in abundants) for a in abundants):\n res += n\n\n return res",
"def task_8_sum_of_ints(data: List[int]) -> int:\n return sum(data)",
"def get_solution():\n return jnp.sum(jnp.arange(10))",
"def sum_of_squares(n):\n result = i = 0\n while i < n:\n result += i\n i += 1\n return result",
"def euler2(N):\n i = 0\n current_term = 0\n res = 0\n while current_term <= N:\n res += current_term\n i += 1\n current_term = fib(3*i)\n return res",
"def squareOfSum(num):\n return sum(range(1, num + 1)) ** 2",
"def main():\n\n import sys\n sys.setrecursionlimit(10**7)\n from itertools import accumulate, combinations, permutations, product # https://docs.python.org/ja/3/library/itertools.html\n # accumulate() returns iterator! to get list: list(accumulate())\n from math import factorial, ceil, floor\n def factorize(n):\n \"\"\"return the factors of the Arg and count of each factor\n \n Args:\n n (long): number to be resolved into factors\n \n Returns:\n list of tuples: factorize(220) returns [(2, 2), (5, 1), (11, 1)]\n \"\"\"\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct\n def combinations_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result\n def combinations_with_replacement_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items allowing individual elements to be repeated more than once.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n if n < 0 or r < 0:\n raise Exception('combinations_with_replacement_count(n, r) not defined when n or r is negative')\n elif n == 0:\n return 1\n else:\n return combinations_count(n + r - 1, r)\n from bisect import bisect_left, bisect_right\n from collections import deque, Counter, defaultdict # https://docs.python.org/ja/3/library/collections.html#collections.deque\n from heapq import heapify, heappop, heappush, heappushpop, heapreplace,nlargest,nsmallest # https://docs.python.org/ja/3/library/heapq.html\n from copy import deepcopy, copy # https://docs.python.org/ja/3/library/copy.html\n from operator import itemgetter\n # ex1: List.sort(key=itemgetter(1))\n # ex2: sorted(tuples, key=itemgetter(1,2))\n from functools import reduce\n def chmin(x, y):\n \"\"\"change minimum\n if x > y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current minimum value\n y (long): potential minimum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x > y:\n x = y\n return (x, True)\n else:\n return (x, False)\n def chmax(x, y):\n \"\"\"change maximum\n if x < y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current maximum value\n y (long): potential maximum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x < y:\n x = y\n return (x, True)\n else:\n return (x, False)\n\n from fractions import gcd # Deprecated since version 3.5: Use math.gcd() instead.\n def gcds(numbers):\n return reduce(gcd, numbers)\n def lcm(x, y):\n return (x * y) // gcd(x, y)\n def lcms(numbers):\n return reduce(lcm, numbers, 1)\n\n # first create factorial_list\n # fac_list = mod_factorial_list(n)\n INF = 10 ** 18\n MOD = 10 ** 9 + 7\n modpow = lambda a, n, p = MOD: pow(a, n, p) # Recursive function in python is slow!\n def modinv(a, p = MOD):\n # evaluate reciprocal using Fermat's little theorem:\n # a**(p-1) is identical to 1 (mod p) when a and p is coprime\n return modpow(a, p-2, p)\n def modinv_list(n, p = MOD):\n if n <= 1:\n return [0,1][:n+1]\n else:\n inv_t = [0,1]\n for i in range(2, n+1):\n inv_t += [inv_t[p % i] * (p - int(p / i)) % p]\n return inv_t\n def modfactorial_list(n, p = MOD):\n if n == 0:\n return [1]\n else:\n l = [0] * (n+1)\n tmp = 1\n for i in range(1, n+1):\n tmp = tmp * i % p\n l[i] = tmp\n return l\n def modcomb(n, k, fac_list = [], p = MOD):\n # fac_list = modfactorial_list(100)\n # print(modcomb(100, 5, modfactorial_list(100)))\n from math import factorial\n if n < 0 or k < 0 or n < k: return 0\n if n == 0 or k == 0: return 1\n if len(fac_list) <= n:\n a = factorial(n) % p\n b = factorial(k) % p\n c = factorial(n-k) % p\n else:\n a = fac_list[n]\n b = fac_list[k]\n c = fac_list[n-k]\n return (a * modpow(b, p-2, p) * modpow(c, p-2, p)) % p\n def modadd(a, b, p = MOD):\n return (a + b) % MOD\n def modsub(a, b, p = MOD):\n return (a - b) % p\n def modmul(a, b, p = MOD):\n return ((a % p) * (b % p)) % p\n def moddiv(a, b, p = MOD):\n return modmul(a, modpow(b, p-2, p))\n\n \"\"\" initialize variables and set inputs\n # initialize variables\n # to initialize list, use [0] * n\n # to initialize two dimentional array, use [[0] * N for _ in range(N)]\n # set inputs\n # open(0).read() is a convenient method:\n # ex) n, m, *x = map(int, open(0).read().split())\n # min(x[::2]) - max(x[1::2])\n # ex2) *x, = map(int, open(0).read().split())\n # don't forget to add comma after *x if only one variable is used\n # preprocessing\n # transpose = [x for x in zip(*data)]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [(1, 4, 7), (2, 5, 8), (3, 6, 9)]\n # flat = [flatten for inner in data for flatten in inner]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # calculate and output\n # output pattern\n # ex1) print(*l) => when l = [2, 5, 6], printed 2 5 6\n \"\"\"\n\n # functions used\n r = lambda: sys.stdin.readline().strip()\n r_int = lambda: int(r())\n R = lambda: list(map(int, r().split()))\n Rfloat = lambda: list(map(float, r().split()))\n Rtuple = lambda: tuple(map(int, r().split()))\n Rmap = lambda: map(int, r().split())\n\n \"\"\" how to treat input\n # single int: int(r())\n # single string: r()\n # single float: float(r())\n # line int: R()\n # line string: r().split()\n # line (str, int, int): [j if i == 0 else int(j) for i, j in enumerate(r().split())]\n # lines int: [R() for _ in range(n)]\n \"\"\"\n\n # main\n N, Q = R()\n STX = [R() for _ in range(N)]\n STX.sort(key=itemgetter(2))\n\n D = [int(r()) for _ in range(Q)]\n Stopped = [-1] * Q\n ans = [-1] * Q\n\n for s, t, x in STX:\n l = bisect_left(D, s-x)\n r = bisect_left(D,t-x)\n a = l\n while a < r:\n if Stopped[a] == -1:\n ans[a] = x\n Stopped[a] = r\n a += 1\n else:\n a = Stopped[a]\n\n for i in ans:\n print(i)\n\n \"\"\"memo: how to use defaultdict of list\n # initialize\n Dic = defaultdict(list)\n # append / extend\n Dic[x].append(y)\n # for\n for k, v in Dic.items():\n \"\"\"",
"def p159(N):\n DR = compute_all_dr(10**6)\n\n def DRS(factors):\n \"\"\"Computes the DRS given a dictionary factorization.\"\"\"\n return sum([DR[n] * power for n, power in factors.items()])\n\n def compute_MDRS(n):\n \"\"\"Computes the MDRS from a number n using greedy local search.\"\"\"\n candidates = [] # Candidates with local optimal DRS\n\n def improve(factors):\n \"\"\"Recursively multiply two factors as long as it\n does not decrease the digital root sum of the factors.\"\"\"\n factors = defaultdict(int, factors)\n improvement = 0\n for a, b in find_pairs(factors):\n c = a*b\n # If local improvement is possible, create remove the two\n # old factors once and introduce a new one\n if DR[c] >= DR[a] + DR[b]:\n new_factors = factors.copy()\n new_factors[a] -= 1\n new_factors[b] -= 1\n new_factors[c] += 1\n improvement += 1\n improve(new_factors)\n\n # If no more local improvements are possible then add\n # to the pool of local optima as candidates for MDRS\n if not improvement:\n candidates.append(factors)\n\n # Compute the potential candidates\n improve(factorint(n))\n return max([DRS(f) for f in candidates])\n\n # Loop over all numbers.\n total = 0\n for n in range(2, N):\n total += compute_MDRS(n)\n return total",
"def sumSquareDiff():\n\n def getEachSqareRange(a,b):\n return a + b**2 \n def getTotalSquareRange(a,b):\n return a + b\n \n print(reduce(getTotalSquareRange,range(1,101)) ** 2 - reduce(getEachSqareRange,range(1,101)))",
"def sum_multiples(num):\n pass",
"def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)",
"def square_of_sum(n):\n return ((n * (n+1)) / 2)**2",
"def sum_of_squares(n):\n return (n * (n+1) * (2*n + 1)) / 6",
"def non_abundant_sums():\n # the sum of divisors of every number\n divisor_sum = [0] * LIMIT\n for i in range(1, LIMIT):\n for j in range(i * 2, LIMIT, i):\n divisor_sum[j] += i\n # abundant numbers\n abundant_nums = [i for (i, x) in enumerate(divisor_sum) if x > i]\n\n expressible = [False] * LIMIT\n for i in abundant_nums:\n for j in abundant_nums:\n if i + j < LIMIT:\n expressible[i + j] = True\n else:\n break\n ans = sum(i for (i, x) in enumerate(expressible) if not x)\n return str(ans)",
"def find_three_sum(numbers, desired_sum=DEFAULT_SUM):\n for index, number in enumerate(numbers):\n # We calculate the number required to get to 2020.\n required_num = desired_sum - number\n\n for index2, number2 in enumerate(numbers[index:]):\n sum_of_num = number + number2\n required_num = desired_sum - sum_of_num\n\n if required_num < 0:\n continue\n\n # Check if the required number is in the list (this way we don't have to loop over all nums again &\n # try to see if it's the desired number!)\n if required_num in numbers and numbers.index(required_num) != index:\n print(f\"Numbers {number}, {number2}, and {required_num} together sum to {desired_sum}!\")\n\n return number * number2 * required_num",
"def sumRange(self, i, j):\n if not self.nums: return 0 # edge case\n return self.sum(j+1)-self.sum(i)",
"def find_the_sum(number):\n the_sum = 0\n\n for i in range(number):\n # The number is a multiple of 3 or 5\n # If the number is a multiple of both 3 and 5, it is counted once\n if (i % 3 == 0) or (i % 5 == 0):\n the_sum += i\n\n return the_sum",
"def fakultet (n = 1):\n sum = 1\n for i in range(n, 1, -1):\n sum *= i\n return sum",
"def summation_of_primes():\n \n k=1\n acc=2\n for x in range(2000000):\n if x!=0 and x%2!=0 and x%4!=0 and x%6!=0 and x%8!=0 and x%10!=0:\n k=1\n for m in range(x):\n if x!=1 and m!=0 and m!=1 and x%m==0 and x!=m:\n k=2\n if k==1 and x!=1 and x%2!=0 and x%4!=0: #and y!=2:\n acc=acc+x\n #print str(acc)+' THIS IS ACC\"\"\"\n print x\n return acc",
"def sum1():\n\txs = []\n\tfor i in range(100000):\n\t\tnum = joe.randrange(1000)\n\t\txs.append(num)\n\n\ttot = sum(xs)\n\treturn tot",
"def consecutiveNumbersSum(self, N):\n\n count = 0\n # nmax = int(-1 + sqrt(1+8*N)/2)\n # print(nmax)\n n = 1\n n2 = n*(n-1)/2 + n\n while n2 <= N:\n if (N-n2) % n == 0:\n # print(n)\n count += 1\n n += 1\n n2 = n*(n-1)/2 + n\n\n # Note N-(n2-n) % n == N-n2 % n\n return count",
"def sum_amnicable(limit):\n return sum(map(lambda num: num * is_amnicable(num), range(2, limit)))",
"def no_math_solution(n: int):\n # After 2 (the second fibonacci number), every third number is even\n if(n < 2):\n return 0\n fibs = [2, 3, 5]\n sum = 0\n while(fibs[0] <= n):\n sum += fibs[0]\n fibs[0] = fibs[1] + fibs[2]\n fibs[1] = fibs[0] + fibs[2]\n fibs[2] = fibs[0] + fibs[1]\n return sum",
"def question_26(list_num: int) -> int:\n return sum(list_num)",
"def summationReduce(lower, upper):\r\n if lower > upper:\r\n return 0\r\n else:\r\n return reduce(lambda x, y: x + y, range(lower, upper + 1))",
"def recursiveSums(desiredNum, values, depth=0, max_depth=5):\n depth+=1\n if(depth>max_depth):\n return\n if(len(values)==1):\n if(values[0]==desiredNum):\n return values[0]\n else:\n arr = []\n removals = []\n for i, value in enumerate(values):\n thisDesiredNum = desiredNum-value\n if(thisDesiredNum==0):\n arr.append(value)\n elif(thisDesiredNum>0):\n #quick fix prevents double counting here\n newValues = [l for l in values if(l not in removals)]\n newValues.pop(newValues.index(value))\n arr.append([value])\n if(len(newValues)!=0 and sum(newValues)>=thisDesiredNum):\n newSums = recursiveSums(thisDesiredNum, newValues, depth, max_depth)\n if(newSums):\n if(isinstance(newSums, int)):\n arr.append([newSums])\n else:\n arr[-1].extend(newSums)\n if(len(arr[-1])==0 or arr[-1]==[value]):\n arr.pop()\n removals.append(value)\n #remove unusable values\n iteratedValues = [value for value in values if(value not in removals)]\n if(iteratedValues):\n arr.append(recursiveSums(desiredNum, iteratedValues, depth, max_depth))\n return arr",
"def radical_sum(num, level):\n if level == 0:\n return 0\n \n return ((radical_sum(num, level - 1) + num) ** 0.5)",
"def problem2(m, p):\n total = 0\n for k in range(m, m ** p):\n if is_prime(k):\n total = total + sum_of_digits(k)\n return total",
"def consecutive_prime_sum():\n print \"testing consecutive_prime_sum\"\n sum=0\n for x in range(100000):\n if x!=1:\n if recursive_prime(x)=='prime':\n #print x\n sum = sum + x\n if sum > 1000000:\n sum = sum - x\n return sum\n #if x is prime, then add x to sum\n #check if sum is above 1 million\n #if sum is above 1 million, then subtract x from it and return that number",
"def three_sum(nums: [int]) -> [[int]]:\n check_validity(nums, 0, repetitive_numbers=True)\n if len(nums) <= 2 or not nums:\n return []\n nums.sort()\n result = []\n for i in range(len(nums)):\n left = i\n middle = left + 1\n right = len(nums) - 1\n while left < right and middle < right:\n first = nums[left]\n second = nums[middle]\n third = nums[right]\n if first + second + third > 0:\n right -= 1\n continue\n elif first + second + third == 0 and [first, second, third] not in result:\n result.append([first, second, third])\n middle += 1\n return result",
"def sum_square_difference(n):\n\tdifference = (n-1)*(n)*(n+1)*(3*n+2)/12\n\treturn difference",
"def problem10():\n total_sum = 0\n for x in xrange(1, 2000000):\n if is_prime(x):\n total_sum += x\n return total_sum",
"def sum_finder(nums, sum_wanted):\r\n\r\n for i, ni in enumerate(nums):\r\n\r\n for x, nx in enumerate(nums[i+1:]):\r\n\r\n if ni + nx == sum_wanted:\r\n print(\"Yes\", ni, \"and\", nx, \"=\", sum_wanted)\r\n else:\r\n print(ni, \"and\", nx, \"=\", \"No match\")",
"def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result",
"def total(n):\n if n < 0:\n return None\n else:\n result = 0\n for i in range(n + 1):\n result += i\n return result",
"def iterative_fuel(d):\n accumulator = d\n total = 0\n while True:\n accumulator = math.floor(accumulator / 3) - 2\n if accumulator < 0:\n return total\n total += accumulator",
"def summer_69(int_list):\n six_indexes = get_indexes_of(6, int_list) # [3,7]\n nine_indexes = get_indexes_of(9, int_list) # [5,9,10]\n nine_ranges = get_nine_ranges(six_indexes, len(int_list)) # [(3,(4,6)), (7,(8,inf))]\n ignored_sections = get_ignored_sections(nine_ranges, nine_indexes) # [(3,5), (7,9)]\n total = sum(int_list)\n for min_index, max_index in ignored_sections:\n total -= sum(int_list[min_index:max_index+1])\n return total",
"def question_30(x: int) -> int:\n # Base case below:\n if x == 0:\n return 1\n # Recursive function below:\n else:\n result = 1\n while x > 0:\n for i in range(x):\n result *= 2 ** question_30(i)\n x -= 1\n return result",
"def helper(nums_dict, S):\n if not nums_dict:\n return 1 if S == 0 else 0\n \n num = max(nums_dict.keys())\n num_count = nums_dict.pop(num)\n remaining_sum = sum([k*v for k, v in nums_dict.items()])\n #print(nums_dict, remaining_sum, S)\n \n ans = 0\n num_sum = -num*num_count\n for i in range(num_count+1):\n if -remaining_sum <= S - num_sum <= remaining_sum:\n ans += helper(nums_dict.copy(), S-num_sum) * comb(num_count, i)\n num_sum += 2*num\n return ans",
"def sum13(nums):\n if len(nums) == 0:\n return 0\n for i in range(0, len(nums)):\n if nums[i] == 13:\n nums[i] = 0\n if i + 1 < len(nums):\n nums[i + 1] = 0\n return sum(nums)",
"def coding_problem_42(numbers, target):\n if target == 0:\n return []\n\n valid_numbers = [n for n in numbers if 0 < n <= target]\n for number in sorted(valid_numbers, reverse=True):\n\n remaining_numbers = copy(valid_numbers)\n remaining_numbers.remove(number)\n partial_sum = coding_problem_42(remaining_numbers, target - number)\n if partial_sum is not None:\n return [number] + partial_sum\n\n return None",
"def sum_n_numbers():\n n=int(input(\"Enter a number:\"))\n s=0\n for i in range(n+1):\n s+=i\n print(\"sum=\",s)",
"def sum_divisibles(limit):\n res = [x for x in range(limit) if x % 3 == 0 or x % 5 == 0]\n return sum(res)",
"def minOperations(n):\n if type(n) is not int or n < 2:\n return 0\n\n summation = []\n\n while n % 2 == 0:\n summation.append(2)\n n = n // 2\n\n for i in range(3, n + 1, 2):\n while n % i == 0:\n summation.append(i)\n n = n // i\n\n return (sum(summation))",
"def binary_sums(start, limit):\n for n in range(start, limit):\n for i in range(1, n/2 + 1):\n yield i, n - i",
"def faster_brute_force_solution():\n def calc_b(a, sum):\n return ((sum**2/2) - (sum * a)) / (sum - a)\n\n a, b = next(\n (a, calc_b(a, TRIPLET_SUM))\n for a in range(1, TRIPLET_SUM)\n if calc_b(a, TRIPLET_SUM).is_integer()\n )\n\n return a * b * (TRIPLET_SUM - a - b)",
"def six():\r\n \r\n sum_squared = 0\r\n squared_sum = 0\r\n \r\n for i in range(1, 101):\r\n sum_squared += i**2\r\n squared_sum += i\r\n squared_sum = squared_sum**2\r\n \r\n return squared_sum - sum_squared",
"def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))",
"def twenty():\r\n \r\n n = 100\r\n factorial = 1\r\n sum = 0\r\n \r\n while n > 0:\r\n factorial *= n\r\n n -= 1\r\n \r\n for c in str(factorial):\r\n sum += int(c)\r\n \r\n return sum",
"def sum_of_numbers(numbers):\r\n return sum(numbers)",
"def multiplesof3and5optimized(to):\n return sum_of_divisible(to - 1, 5) - sum_of_divisible(to - 1, 15) + sum_of_divisible(to - 1, 3)",
"def sol(n):\n p = 1\n res = 0\n \n while n:\n p*=5\n if n&1:\n res+=p\n n=n>>1\n return res%1000000007",
"def dynamic_programming_solution():\n num_combos = [1] * (TOTAL + 1)\n\n # ignore 1p coin since we are initializing num_combos with that value\n for coin in COINS[1:]:\n for total in range(TOTAL + 1):\n # if coin value is greater than total then num_combos cannot change\n if coin <= total: \n num_combos[total] += num_combos[total - coin]\n\n return num_combos[TOTAL]",
"def problem2(limit):\n index = 6\n total = 2\n while True:\n fib_n = fib(index)\n if fib_n <= limit:\n total += fib_n\n index += 3\n else:\n break\n return total",
"def elementary_summand(fixed, i):\n if i < fixed:\n return 0\n elif i == fixed:\n return 2\n else:\n return 1",
"def mystery_1b_flat(n: int, rows_of_nums: list[list[int]]) -> int:\n if len(rows_of_nums) > 1 and n == 1:\n return 0\n elif len(rows_of_nums) > n > 0 and n in rows_of_nums[n]:\n return sum(rows_of_nums[n]) + n\n elif len(rows_of_nums) > n > 0:\n return sum(rows_of_nums[0])\n elif len(rows_of_nums) > 20:\n return 20\n else:\n return n",
"def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))",
"def sum_mult(number, limit):\n limit = limit - 1\n limit = (limit - (limit%number))/number\n return (number * limit * (limit+1))/2",
"def for_loop_sum_to_end(end):\n final_number = 0\n for numbers in range(0, int(end+1)):\n final_number += numbers\n\n print(final_number)",
"def evansPerfectNumbers(n):\n assert n>1\n perfect = []\n for i in range(1,n+1):\n sums = 0\n for j in range(1,i):\n sums += evansMod(i,j)*j\n if sums == i:\n perfect.append(i)\n #print(perfect) #for testing only\n return perfect",
"def interleaved_sum(n, odd_term, even_term):\n \"*** YOUR CODE HERE ***\"\n if n == 1:\n return 1\n f = odd_term\n if n % 2 == 0:\n f = even_term\n return f(n) + interleaved_sum(n - 1, odd_term, even_term)",
"def fn(x):\n if x <= 0: return int(x == 0)\n return sum(fn(x - xx) for xx in nums)"
] |
[
"0.7452489",
"0.69697046",
"0.6922565",
"0.6716069",
"0.6671347",
"0.66690093",
"0.66667116",
"0.66388094",
"0.66203743",
"0.66162395",
"0.6604934",
"0.6604606",
"0.64996886",
"0.648764",
"0.64773923",
"0.64563894",
"0.6424136",
"0.6397848",
"0.6307458",
"0.62973",
"0.6296701",
"0.626981",
"0.62603974",
"0.62603784",
"0.62457544",
"0.6236607",
"0.6232129",
"0.62314445",
"0.62196225",
"0.621105",
"0.6207595",
"0.61870426",
"0.61855423",
"0.6161921",
"0.6160083",
"0.6152405",
"0.6143922",
"0.6140491",
"0.61174613",
"0.61136454",
"0.6109945",
"0.61019415",
"0.6095278",
"0.6087745",
"0.60833055",
"0.6082441",
"0.6063174",
"0.60584253",
"0.60561395",
"0.60513514",
"0.6047285",
"0.60423726",
"0.6042162",
"0.60363543",
"0.6033213",
"0.602216",
"0.60170954",
"0.6013759",
"0.60024935",
"0.59946746",
"0.5988547",
"0.59776074",
"0.5968363",
"0.59681123",
"0.5965756",
"0.5964314",
"0.59641343",
"0.59633857",
"0.5960027",
"0.5957413",
"0.59538364",
"0.5953523",
"0.5953523",
"0.5952852",
"0.5944618",
"0.59420455",
"0.59353006",
"0.5930023",
"0.59286857",
"0.59279",
"0.5926742",
"0.5918991",
"0.5914976",
"0.59143",
"0.59135586",
"0.59123886",
"0.5910433",
"0.5909653",
"0.5903896",
"0.59038085",
"0.58958966",
"0.58926654",
"0.588957",
"0.58806795",
"0.5880011",
"0.5876381",
"0.58742744",
"0.58571994",
"0.58516276",
"0.5849245"
] |
0.69348073
|
2
|
Check the northbound queue for RPC's queued by GUI or SOAP requests. A client should connect triggered by a CONNECTION_REQUEST and any RPC's queued by the northbound will be then added to the session queue by this function.
|
def add_nb_queue_to_session_queue(self, session):
rpc_list = []
client_id = get_element('cid', session['client'])
if client_id is not None and client_id in RPCS.Northbound_Queue:
# Check if all commands have been serviced
if RPCS.Northbound_Queue[client_id]:
# Get first request in the client queue, in the form:
# (Client_COMMAND, RESPONSE STREAM)
# TODO pop might be unresolved
nb_request = RPCS.Northbound_Queue[client_id].pop(0)
# Parse and queue request(s)
client_command = nb_request[0]
rpc_list.append(client_command)
# Insert nb commands to the front of queue
session['queue'] = queued_nb_methods + session['queue']
# Store stream which expects the client response in the session
session['nb_response_stream'] = nb_request[1]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)",
"def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)",
"def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True",
"def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)",
"def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())",
"def run(self, session):\n rpc = None\n if session['client']['event'] == 'CONNECTION_REQUEST':\n self.add_nb_queue_to_session_queue(session)\n\n while rpc is None and session['queue']:\n try:\n # Loop through queue until there is an RPC to send, or until\n # there are no more RPCs queued, or until an error occurs\n session['rpc']['method'] = session['queue'].pop(0)\n rpc = session['rpc']['method'].send_request(session)\n except ClientMethodException:\n # Failed to send this RPC, move on to the next\n LOG.debug(\"Error during preparation of client method: %s\" % str(session['rpc']['method']))\n continue\n except Exception:\n traceback.print_exc()\n LOG.debug(\"Unexpected error during preparation of client method: %s\" % str(session['rpc']['method']))\n return RPCS.SendingRpc, None\n\n if rpc is not None:\n # RPC ready: Send it and ExpectResponse\n return RPCS.ExpectResponse, rpc\n else:\n # If there are no (more) RPCs to send, log ok\n # and send done, indicating communication is complete\n session['log'] = {'rc': 'ok', 'msg': ''}\n session['db'].clear_dirtyflag(session['client']['cid'])\n return RPCS.Listening, {'method': 'done'}",
"def on_bindok(self, unused_frame):\n\n self.logger.info('queue bound')\n if self.acked:\n # if we wish to care about the servers replies, this is were we set up things\n self.logger.info('issuing confirm.select RPC')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n if self.sender:\n pass\n self.send()\n else:\n self.start_consuming()",
"def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()",
"def on_bindok(self, unused_frame):\n logger.info('Queue bound')\n self.setup_error_queue()",
"def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False",
"def _accept_requests(self):\n try:\n request = self.request_queue.get(timeout=self.REQUESTS_TIMEOUT)\n self.logger.debug(\"Adding new requests\")\n for _ in xrange(self.REQUESTS_MAX_AMOUNT):\n self._requests.append(request)\n request = self.request_queue.get_nowait()\n\n except EmptyQueueError:\n return\n\n self.logger.debug(\"Done adding new requests\")",
"def on_queue(self):\n self.ws_opened.wait()\n\n while self.wsapp.keep_running:\n try:\n msg = self.shot_outbox.get(timeout=0.001)\n except:\n continue\n action = msg['action']\n payload = msg['payload']\n\n if action == 'remote':\n # Choose the remote server\n buf = json.dumps(payload)\n self.__ws_conn.send(f\"{buf}\\n\")\n elif action == 'local':\n # Choose the local server\n result = payload['params']['result']\n shot = payload['shot']\n prev = self.ret.get(result, 0)\n self.ret[result] = prev + 1\n\n del self.shot_threadings[shot]\n self.__shot_finished[shot] = True\n self.__bar.update(1)\n if all(self.__shot_finished):\n # All shots are completed\n self.failed = False\n self.wsapp.keep_running = False\n break",
"def check_session_queue_full(self) -> None:\n if (\n self.project.sessions_queued is None\n ): # no limit set so always return (success)\n return\n\n queued_request_count = self.project.session_requests.count()\n if queued_request_count >= self.project.sessions_queued:\n raise SessionException(\n \"There are already {}/{} requests for sessions for this project.\".format(\n queued_request_count, self.project.sessions_queued\n )\n )",
"def on_queue_declareok(self, method_frame):\n\n for queue in self._handlers.keys():\n LOGGER.debug('Binding %s to %s with %s',\n self.EXCHANGE, queue, self.ROUTING_KEY)\n self._channel.queue_bind(self.on_bindok, queue,\n self.EXCHANGE, self.ROUTING_KEY)",
"def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure",
"def test_is_queued(self):\r\n\r\n answer_ids = sorted(self.problem.get_question_answers())\r\n\r\n # CodeResponse requires internal CorrectMap state. Build it now in the unqueued state\r\n cmap = CorrectMap()\r\n for answer_id in answer_ids:\r\n cmap.update(CorrectMap(answer_id=answer_id, queuestate=None))\r\n self.problem.correct_map.update(cmap)\r\n\r\n self.assertEquals(self.problem.is_queued(), False)\r\n\r\n # Now we queue the LCP\r\n cmap = CorrectMap()\r\n for i, answer_id in enumerate(answer_ids):\r\n queuestate = CodeResponseTest.make_queuestate(i, datetime.now(UTC))\r\n cmap.update(CorrectMap(answer_id=answer_ids[i], queuestate=queuestate))\r\n self.problem.correct_map.update(cmap)\r\n\r\n self.assertEquals(self.problem.is_queued(), True)",
"def check_queue():\n while cmd_source.poll(): # Pop off all the values.\n stage, operation, value = cmd_source.recv()\n if operation == 'kill':\n # Destroy everything\n window.destroy()\n # mainloop() will quit, this function will too, and\n # all our stuff will die.\n return\n elif operation == 'hide':\n window.withdraw()\n elif operation == 'show':\n window.deiconify()\n elif operation == 'value':\n stage_values[stage] = value\n set_nums(stage)\n elif operation == 'length':\n set_length(stage, value)\n elif operation == 'skip':\n skip_stage(stage)\n else:\n raise ValueError('Bad operation {!r}!'.format(operation))\n \n # Continually re-run this function in the TK loop.\n window.after_idle(check_queue)",
"def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0",
"async def _listen_on_orders(self):\n # The lock is used to make sure the websocket is setup before using it.\n await self._orders_sock_info.connected_event.wait()\n try:\n async for message in self._orders_sock_info.ws:\n self._orders_sock_info.ready.set()\n if self._orders_sock_info.queue.qsize() >= 100:\n log.warning(\"Websocket message queue is has \"\n f\"{self._orders_sock_info.queue.qsize()} pending \"\n \"messages.\")\n await self._orders_sock_info.queue.put(message)\n finally:\n await self._orders_sock_info.ws.close()",
"def check(self):\n\n print('Requester object is active: \\t', str(self.is_active))\n print('Number of requests sent: \\t', str(self.n_requests))\n print('Requester opened: \\t\\t', str(self.st_time))\n print('Requester closed: \\t\\t', str(self.en_time))",
"def _flash_queued_window(self) -> None:\n try:\n message = self.events.get(timeout=1)\n self.processing_event = True\n except Empty:\n return None\n\n try:\n self.router.route_request(message)\n except UnexpectedMessageType:\n logging.error(f\"Unexpected request type - {message.event_type}. Aborting...\")\n self.shutdown()\n except WMError:\n pass\n finally:\n self.processing_event = False",
"def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)",
"def is_call_waiting(self) -> bool:",
"def issuer_liveness_check():\n global app_config\n\n if app_config[\"running\"]:\n # return True until we get a shutdown request\n return True\n\n # return True until the work queue is cleared\n return tob_connection_active()",
"def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False",
"def is_incall_connected(self) -> bool:",
"def queue():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Add the operation to the local queue.\n operation = manager.enqueue(session.get_sid())\n\n # If the local list of operation to solve is empty the Mmanager.enqueue`\n # method returns `None`, ence if the value of `operation` is `None`\n # the client should stop.\n return dict(ok=True, operation=operation, halt=operation is None)",
"def drain_call_queue(self):\n pass",
"def ctrlqueue_do_all_queue(self) -> int:\n try:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(11), ctypes.c_int32(0))\n except Exception as e:\n Base.warn_msg(\"An error occur when tried to *Do All Control Actions of CrlQueue* check if *Queue* is NOT \"\n \"empty\", e)",
"def on_queue_declareok(self, method_frame):\n self.logger.info('binding %s and %s together with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)",
"def call_q(self, _):\n return False",
"def call_q(self, _):\n return False",
"def call_q(self, _):\n return False",
"def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }",
"def check_ack_queue(self):\r\n try:\r\n while True:\r\n ack = self.ack_queue.get_nowait()\r\n self.handle_ack(ack)\r\n except queue.Empty:\r\n pass",
"def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)",
"def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())",
"def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )",
"def __verify_queue_item(self, queue_item):\n\n browser = BrowserHelper.request(queue_item)\n return browser and len(browser.window_handles) >= 2",
"def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass",
"def _listen_to_requests(self):\n while True:\n try:\n request = self._client.recv(1024)\n except socket.error as err:\n if DEBUG_LEVEL >= 1:\n print \"Got socket error: {}\".format(err.message)\n self._client.close()\n return True\n\n if not request:\n if DEBUG_LEVEL >= 0:\n print \"Closing connection\"\n self._client.close()\n return True\n\n if DEBUG_LEVEL >= 2:\n print request\n\n if not HTTPValidation.validate_request(request):\n if DEBUG_LEVEL >= 0:\n print \"Invalid request, closing...\"\n self._client.send(public_response_functions.get_error_response())\n self._client.close()\n return True\n\n if not self._send_response(request):\n if DEBUG_LEVEL >= 0:\n print \"Closing connection...\"\n self._client.close()\n return",
"def _dispatch_from_client_request(self):\n # Listen for client connection\n self._from_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1)\n\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n\n client_name_read, _, _ = select([client_conn], [], [client_conn])\n if client_name_read:\n client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8'))\n else:\n print(\"Connection closed\")\n continue\n\n self._thread_lock.acquire()\n self._from_client_connections[client_conn] = client_name\n self._state[client_name] = 0\n self._thread_lock.release()\n\n print(\"Receiving commands from [\" + client_name + \", \" + client_addr[0] + \", \" + str(client_addr[1]) + ']')",
"def serviceQueries(self):\n #log.debug(f\"{self.name}: servicing queries for {len(self.connections)} connections.\")\n for ca, requester in self.connections.items():\n if requester.ims:\n log.info(\"Server %s received:\\n%s\\n\", self.name, requester.ims)\n requester.process()\n for label, msg in requester.process_results_iter():\n requester.send(msg, label=label)\n break # throttle just do one cue at a time\n\n if not requester.persistent: # not persistent so close and remove\n ix = self.server.ixes[ca]\n if not ix.txbs: # wait for outgoing txes to be empty\n self.closeConnection(ca)",
"def __remove_request_from_queue(self, sender):\n with self.__queue.mutex:\n for x in self.__queue.queue:\n if x[1] == sender:\n self.__queue.queue.remove(x)\n return True\n return False",
"def need_list():\n operation = request.args['operation']\n timestamp = int(time())\n id_session = request.remote_addr\n keys = ['operation', 'timestamp', 'id_session']\n values = [operation, timestamp, id_session]\n data = dict(zip(keys, values))\n msg = json.dumps(data)\n qm.send(cmdq, msg)\n return \"ok\"",
"def activate(self):\n self.socket.listen(self.request_queue_size)",
"def check_global_request(self, kind, msg):\n return False",
"def check_bcr_catchup(self):\n logger.debug(f\"Checking if BlockRequests has caught up {len(BC.Default().BlockRequests)}\")\n\n # test, perhaps there's some race condition between slow startup and throttle sync, otherwise blocks will never go down\n for peer in self.Peers: # type: NeoNode\n peer.stop_block_loop(cancel=False)\n peer.stop_peerinfo_loop(cancel=False)\n peer.stop_header_loop(cancel=False)\n\n if len(BC.Default().BlockRequests) > 0:\n for peer in self.Peers:\n peer.keep_alive()\n peer.health_check(HEARTBEAT_BLOCKS)\n peer_bcr_len = len(peer.myblockrequests)\n # if a peer has cleared its queue then reset heartbeat status to avoid timing out when resuming from \"check_bcr\" if there's 1 or more really slow peer(s)\n if peer_bcr_len == 0:\n peer.start_outstanding_data_request[HEARTBEAT_BLOCKS] = 0\n\n print(f\"{peer.prefix} request count: {peer_bcr_len}\")\n if peer_bcr_len == 1:\n next_hash = BC.Default().GetHeaderHash(self.CurrentBlockheight + 1)\n print(f\"{peer.prefix} {peer.myblockrequests} {next_hash}\")\n else:\n # we're done catching up. Stop own loop and restart peers\n self.stop_check_bcr_loop()\n self.check_bcr_loop = None\n logger.debug(\"BlockRequests have caught up...resuming sync\")\n for peer in self.Peers:\n peer.ProtocolReady() # this starts all loops again\n # give a little bit of time between startup of peers\n time.sleep(2)",
"def queryAllRequests(self):\n logging.info(\"Querying all requests at ReqMgr instance ...\")\n r = self.reqMgrService.getRequestNames()\n print \"Found %s requests:\" % len(r)\n for req in r:\n print req",
"def serviceRequests(self):\n for k in self.sessions:\n request = self.sessions[k].ongoingRequest\n client = self.sessions[k].clientInstance\n\n if request and request[0] == \"PULL\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(client, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n del self.outgoingBuffer[request[1]]\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH\":\n # Create a copy of your FSIC and sends it to client\n localFSIC = self.calcFSIC(request[2])\n # PUSH2 request : (\"PUSH2\", pushID, filter, localFSIC)\n self.send(client, k, (\"PUSH2\", request[1], request[2], localFSIC))\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH2\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(self.sessions[k].serverInstance, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n self.sessions[k].ongoingRequest = None\n\n elif request:\n raise ValueError('Invalid Request!')",
"def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n enqueue = True\n else:\n enqueue = False\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n if enqueue:\n self.message_queue.put(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)",
"def on_queue_declareok(self, method_frame):\n # LOGGER.info('Binding %s to %s with %s',\n # self.EXCHANGE, self.QUEUE, self.ROUTING_KEY)\n # self._channel.queue_bind(self.on_bindok, self.QUEUE,\n # self.EXCHANGE, self.ROUTING_KEY)\n logger.info(\n \"[{}] Binding to {} with queue {} and routing key \\\"\\\"\".format(self.bot_id, self.exchange,\n self.queue_name))\n\n self._channel.queue_bind(self.on_bindok,\n queue=self.queue_name,\n exchange=self.exchange,\n routing_key=\"\")",
"def IsRequestFromAppSelf(self):\n # Requests from task queues or cron jobs are from app itself.\n return (self.request.headers.get('X-AppEngine-QueueName') or\n self.request.headers.get('X-AppEngine-Cron'))",
"def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')",
"def queueOn() -> None:\n\t\tLogging.enableQueue = Logging.queueSize > 0",
"def answer_waiting_call(self) -> None:",
"def fire_pending_jobs(self, mid: str) -> None:\n self.log.debug(\"Checking for pending jobs on {}\", mid)\n target = PDataContainer(id=mid, host=\"\") # TODO: get a proper target with the hostname\n if self.get_client_protocol(mid) is not None:\n for job in self.jobstore.get_scheduled(target):\n event = type(\"event\", (), {})\n event.jid = job.jid\n event.fun = job.uri\n event.arg = json.loads(job.args)\n threads.deferToThread(self.fire_event, event=event, target=target)",
"def queueStatusAll():",
"def acqstart(self):\n return 0",
"def check_existing(self):\n if self.btcd_container != None:\n self.btcd_container.reload()\n if self.btcd_container.status == \"running\":\n rpcconn, container = self.detect_bitcoind_container(\n self.rpcconn.rpcport\n )\n if container == self.btcd_container:\n return rpcconn\n raise Exception(\"Ambigious Container running\")\n return None",
"def query_thread_func(self):\n while True:\n # Receive and parse the query message.\n message = self.router_socket.recv_multipart()\n assert (len(message) == 3)\n assert not message[1]\n query = json.loads(message[2].decode('utf-8'))\n peer = message[0]\n assert (\"type\" in query)\n log.info(\n \"ACL Manager received packet %s from %s\",\n query, binascii.hexlify(peer)\n )\n\n if query[\"type\"] == \"GETACLSTATE\":\n endpoint = query[\"endpoint_id\"]\n log.info(\"Received query message %s from Felix\" % message)\n self.acl_store.query_endpoint_rules(endpoint)\n query[\"rc\"] = \"SUCCESS\"\n query[\"message\"] = \"\"\n else:\n # Received unexpected message. Log and return it.\n log.warning(\"Received query %s of unknown type\" % query)\n query[\"rc\"] = \"FAILURE\"\n query[\"message\"] = \"Unknown message type: expected GETACLSTATE\"\n\n log.debug(\"Sending response message: %s, %s\" %\n (peer, json.dumps(query).encode(\"utf-8\")))\n self.router_socket.send_multipart(\n (peer,\n \"\",\n json.dumps(query).encode(\"utf-8\"))\n )",
"def check_for_lock_request(self):\n while True:\n sleep(0.1)\n if self.get_state():\n sleep(5)\n self.lock()\n break",
"def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)",
"def _handle_pending(self):\r\n if not self.pending:\r\n self._post_message('')\r\n return\r\n info, desired = self.pending\r\n if desired and self.plugins[desired].busy:\r\n return\r\n self.busy = True\r\n\r\n if desired:\r\n plugins = [self.plugins[desired]]\r\n elif info.name == 'definition' and not info.editor.is_python():\r\n plugins = [p for p in self.plugins.values() if not p.busy]\r\n else:\r\n # use all but the fallback\r\n plugins = [p for p in list(self.plugins.values())[:-1] if not p.busy]\r\n\r\n self.request = RequestHandler(info, plugins)\r\n self.request.introspection_complete.connect(\r\n self._introspection_complete)\r\n self.pending = None",
"def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass",
"def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)",
"def check_connection(self):\n pass",
"def _handle_requests(self):\n for request in self._requests[:]:\n self.logger.debug(\"Handling request: %r\", request)\n\n # an orphan request, client is not alive.\n if not request.server_request and not request.worker.is_alive:\n self.logger.warning(\"Client %r disconnected, request dropped\",\n request.worker.name)\n self._requests.remove(request)\n continue\n\n try:\n request_handler = self._get_request_handler(request)\n reply = request_handler(request)\n\n except _WaitingForResourceException as ex:\n self.logger.exception(str(ex))\n continue\n\n except Exception as ex:\n if isinstance(ex, ServerError):\n code = ex.ERROR_CODE\n content = ex.get_error_content()\n\n else:\n code = ServerError.ERROR_CODE\n content = str(ex)\n\n self.logger.exception(str(ex))\n reply = ErrorReply(code=code, content=content)\n\n reply.request_id = request.message.msg_id\n self._reactor.callFromThread(request.respond, reply)\n\n self._requests.remove(request)",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def check_queue_exists(self, queue_name):\n try:\n yield from self.queue_declare(queue_name, passive=True)\n except asyncio.exceptions.ChannelClosed:\n return False\n return True",
"def add_request_to_queue(self,request):\n self.queue.append(request)",
"def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False",
"def clients_done(self):\r\n if not self.client_list:\r\n return False\r\n elif len(asyncore.socket_map) > 1:\r\n return False\r\n else:\r\n return True",
"def process_clients():\n for client in state.CLIENT_LIST:\n if client.active and client.cmd_ready:\n logging.debug(\"Found a message, processing...\")\n msg_processor(client)",
"def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)",
"def any(self) -> bool:\n return len(self.queue) > 0",
"def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False",
"def check_connection(self):\n try:\n self.mfp.add(2,2)\n logger.info(\"Connection to user API established\")\n except xmlrpclib.ProtocolError:\n logger.error(\"Not possible to connect to MOF+. Check your credentials\")\n exit()\n return",
"def check_session_requests_exist(\n self, session_request_to_use: typing.Optional[SessionRequest]\n ) -> None:\n if session_request_to_use:\n session_request_to_use.delete() # consider this `SessionRequest` to be used up, so remove it\n return\n\n if self.session_requests_exist():\n # other users are waiting and the current user is not first in queue so queue them up\n raise ActiveSessionsExceededException(\n \"Unable to start session for the project as there are already requests queued\"\n )",
"def server_activate(self):\n\t\tself.socket.listen(self.request_queue_size)",
"def test_queue_not_installed(): # pragma: windows\n nt.assert_equal(IPCComm.get_queue(), None)",
"def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)",
"def WaitForRequest(self):\r\n selector = selectors.DefaultSelector()\r\n selector.register(self.listenSocket, selectors.EVENT_READ)\r\n while True:\r\n events = selector.select(timeout = 10)\r\n for __, __ in events:\r\n\r\n self.listenSocket.setblocking(True)\r\n sock, address = self.listenSocket.accept()\r\n self.listenSocket.setblocking(False) \r\n bgThread = threading.Thread(target=self.HandleRemoteCall, args=(sock, address))\r\n bgThread.start()",
"def setup_local_queries(self):\n #what='BANK,BRANCH,CUSTOMER,DEPOSIT'\n self.output(\"Sending initial Accounts query...\")\n what = '*'\n self.rtx_request(\n 'ACCOUNT_GATEWAY', 'ORDER', 'ACCOUNT', what, '', 'accounts', self.handle_accounts, self.accountdata_callbacks,\n self.callback_timeout['ACCOUNT'], self.handle_initial_account_failure\n )\n\n self.output(\"Sending initial Orders query...\")\n self.cxn_get('ACCOUNT_GATEWAY', 'ORDER').advise('ORDERS', '*', '', self.handle_order_update)\n\n self.rtx_request(\n 'ACCOUNT_GATEWAY', 'ORDER', 'ORDERS', '*', '', 'orders', self.handle_initial_orders_response,\n self.openorder_callbacks, self.callback_timeout['ORDERSTATUS'], self.handle_initial_orders_failure\n )\n\n self.output(\"Sending initial Executions query...\")\n execution_where = \"TYPE='ExchangeTradeOrder'\"\n self.cxn_get('ACCOUNT_GATEWAY', 'ORDER').advise('ORDERS', '*', execution_where, self.handle_execution_update)\n self.rtx_request(\n 'ACCOUNT_GATEWAY', 'ORDER', 'ORDERS', '*', execution_where, 'executions', self.handle_initial_executions_response,\n self.execution_callbacks, self.callback_timeout['ORDERSTATUS'], self.handle_initial_executions_failure\n )\n\n # on a reconnect, there may be symbols that need an advise\n for symbol in self.symbols.values():\n symbol.api_initial_request()\n\n self.initial_account_request_pending = True\n self.initial_order_request_pending = True\n self.initial_execution_request_pending = True\n self.initial_update_mapper_pending = True\n self.initialized = False",
"def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)",
"def check(self):\n self.__check_request_limit()",
"def slot_client_connected(self, _sender, _data):\r\n self.check_connect_ready()",
"def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests",
"def validate(self):\n if Session.valid(self.__handler):\n SocketClients.add(Session.get_name(self.__handler), self.__handler)\n return True\n return False",
"def _handle_requests(self, _req_list):\n\n for req in _req_list:\n req_id_elements = req[\"request_id\"].split(\"-\", 1)\n opt = req_id_elements[0]\n req_id = req_id_elements[1]\n Logger.set_req_id(req_id)\n begin_time = datetime.now()\n\n req_body = json.loads(req[\"request\"])\n\n self.logger.debug(\"input request_type = \" + opt)\n self.logger.debug(\"request = \" + json.dumps(req_body, indent=4))\n\n # Check if the same request with prior request.\n (status, result) = self.ahandler.check_history(req[\"request_id\"])\n\n if result is None:\n if opt in (\"create\", \"delete\", \"update\", \"confirm\", \"rollback\"):\n app = self._handle_app(opt, req_id, req_body)\n\n if app is None:\n errstr = \"valet-engine exits due to \" + opt + \" error\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.error(errstr)\n return False\n\n if app.status == \"locked\":\n errstr = \"datacenter is being serviced by another valet\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n continue\n\n (status, result) = self._get_json_result(app)\n\n elif opt in (\"group_query\", \"group_create\"):\n # TODO(Gueyoung): group_delete and group_update\n\n (status, result) = self._handle_rule(opt, req_body)\n\n if result is None:\n errstr = \"valet-engine exits due to \" + opt + \" error\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n return False\n\n if status[\"status\"] == \"locked\":\n errstr = \"datacenter is locked by the other valet\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n continue\n\n elif opt == \"ping\":\n # To check if the local valet-engine is alive.\n\n if req_body[\"id\"] == self.valet_id:\n self.logger.debug(\"got ping\")\n\n status = {\"status\": \"ok\", \"message\": \"\"}\n result = {}\n else:\n continue\n\n else:\n status = {\"status\": \"failed\", \"message\": \"unknown operation = \" + opt}\n result = {}\n\n self.logger.error(status[\"message\"])\n\n else:\n self.logger.info(\"decision already made\")\n\n # Store final result in memory cache.\n if status[\"message\"] != \"timeout\":\n self.ahandler.record_history(req[\"request_id\"], status, result)\n\n # Return result\n if not self.dbh.return_request(req[\"request_id\"], status, result):\n return False\n\n self.logger.debug(\"output status = \" + json.dumps(status, indent=4))\n self.logger.debug(\" result = \" + json.dumps(result, indent=4))\n\n Logger.get_logger('audit').info(\"done request = \" + req[\"request_id\"], beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time)\n self.logger.info(\"done request = \" + req[\"request_id\"] + ' ----')\n\n # this should be handled by exceptions so we can log the audit correctly\n if self.lock.done_with_my_turn() is None:\n return False\n\n return True",
"def check_queue():\n while True:\n logging.info( 'Awaiting task ' )\n yield from asyncio.sleep( 5 )\n loop.create_task( (start_background_tasks()) )",
"def test_wait_for_reactor_thread(self):\n in_call_from_thread = []\n decorator = self.decorator()\n\n @decorator\n def func():\n in_call_from_thread.append(self.reactor.in_call_from_thread)\n\n in_call_from_thread.append(self.reactor.in_call_from_thread)\n func()\n in_call_from_thread.append(self.reactor.in_call_from_thread)\n self.assertEqual(in_call_from_thread, [False, True, False])",
"def test_ipcrm_queues():\n IPCComm.ipcrm_queues()\n nt.assert_equal(len(IPCComm.ipc_queues()), 0)\n mq = IPCComm.get_queue()\n nt.assert_equal(len(IPCComm.ipc_queues()), 1)\n IPCComm.ipcrm_queues(str(mq.key))\n nt.assert_equal(len(IPCComm.ipc_queues()), 0)",
"def correctness_message_sender(self):\n while self.run:\n try:\n msg = self.msg_out_queue.get(True, 1)\n\n except Empty:\n # self.logger.debug(\"Empty Queue\")\n continue\n\n if msg[0] in self.config.sdx_registry:\n next_sdx = self.config.sdx_registry[msg[0]]\n conn = Client((next_sdx.address, next_sdx.port))\n conn.send(json.dumps(msg[1]))\n conn.close()\n else:\n self.logger.debug(\"Error: SDX \" + str(msg[0]) + \" is not in the SDX registry\")",
"def _pending(self, connection):\r\n\r\n # calls the run (tick) starter operation that should start\r\n # and run all the starters registered for the connection or\r\n # continue any starter operation that is pending for it\r\n return connection.run_starter()",
"def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)",
"def testQueueSend(self):\n self.mgr.queueMsg(37)\n self.assertTrue( self.mgr.msgQueue.empty() )\n self.v.send_mavlink.assert_called_with(37)",
"def check_channel_exec_request(self, channel, command):\n return False",
"def curr_queue(self):\n pass",
"def send_and_check(self, pkt, rss_queue):\n self.tester.scapy_append('sendp(%s, iface=\"%s\")' % (pkt, self.tester_itf))\n self.tester.scapy_execute()\n time.sleep(2)\n queue = self.get_queue_number()\n self.verify(queue in rss_queue, \"the packet doesn't enter the expected RSS queue.\")\n return queue"
] |
[
"0.6413275",
"0.63343513",
"0.5929305",
"0.59155464",
"0.5869772",
"0.57600075",
"0.56298584",
"0.5565722",
"0.5554399",
"0.55359644",
"0.5532582",
"0.5485062",
"0.54757154",
"0.54550743",
"0.54375905",
"0.53650355",
"0.53436047",
"0.53087854",
"0.53064525",
"0.5301316",
"0.5291118",
"0.5267402",
"0.5251246",
"0.5250603",
"0.5235898",
"0.52276045",
"0.5218124",
"0.5213547",
"0.5213527",
"0.52091396",
"0.518911",
"0.518911",
"0.518911",
"0.51868486",
"0.51836264",
"0.518094",
"0.51801664",
"0.5162238",
"0.51569635",
"0.51486725",
"0.51422447",
"0.51412696",
"0.51340866",
"0.5131876",
"0.51143086",
"0.5112486",
"0.5111033",
"0.51076806",
"0.51009804",
"0.5098937",
"0.50853",
"0.5084933",
"0.5084652",
"0.50685245",
"0.50611526",
"0.50481457",
"0.5044546",
"0.50400424",
"0.50373185",
"0.5032612",
"0.5019598",
"0.5006794",
"0.50066453",
"0.4997444",
"0.49961892",
"0.49931797",
"0.49724254",
"0.49515572",
"0.49503827",
"0.49404466",
"0.4936255",
"0.49360663",
"0.492986",
"0.49252403",
"0.4917737",
"0.49104756",
"0.49103698",
"0.49050128",
"0.4903853",
"0.49024296",
"0.49013564",
"0.49005184",
"0.48981088",
"0.48980397",
"0.48952296",
"0.48946685",
"0.48942947",
"0.48819512",
"0.48770583",
"0.48728165",
"0.48696634",
"0.48684862",
"0.48668545",
"0.4849951",
"0.48468822",
"0.48401427",
"0.48372802",
"0.48351234",
"0.48314816",
"0.48308048"
] |
0.6457678
|
0
|
When NB requests are queued put them in the session queue. If there are RPCs in queue to be sent, return prepared RPC and move to ExpectResponse. Otherwise, go to Listening and send a 'done' RPC.
|
def run(self, session):
rpc = None
if session['client']['event'] == 'CONNECTION_REQUEST':
self.add_nb_queue_to_session_queue(session)
while rpc is None and session['queue']:
try:
# Loop through queue until there is an RPC to send, or until
# there are no more RPCs queued, or until an error occurs
session['rpc']['method'] = session['queue'].pop(0)
rpc = session['rpc']['method'].send_request(session)
except ClientMethodException:
# Failed to send this RPC, move on to the next
LOG.debug("Error during preparation of client method: %s" % str(session['rpc']['method']))
continue
except Exception:
traceback.print_exc()
LOG.debug("Unexpected error during preparation of client method: %s" % str(session['rpc']['method']))
return RPCS.SendingRpc, None
if rpc is not None:
# RPC ready: Send it and ExpectResponse
return RPCS.ExpectResponse, rpc
else:
# If there are no (more) RPCs to send, log ok
# and send done, indicating communication is complete
session['log'] = {'rc': 'ok', 'msg': ''}
session['db'].clear_dirtyflag(session['client']['cid'])
return RPCS.Listening, {'method': 'done'}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_nb_queue_to_session_queue(self, session):\n rpc_list = []\n client_id = get_element('cid', session['client'])\n\n if client_id is not None and client_id in RPCS.Northbound_Queue:\n # Check if all commands have been serviced\n if RPCS.Northbound_Queue[client_id]:\n # Get first request in the client queue, in the form:\n # (Client_COMMAND, RESPONSE STREAM)\n # TODO pop might be unresolved\n nb_request = RPCS.Northbound_Queue[client_id].pop(0)\n # Parse and queue request(s)\n client_command = nb_request[0]\n rpc_list.append(client_command)\n # Insert nb commands to the front of queue\n session['queue'] = queued_nb_methods + session['queue']\n # Store stream which expects the client response in the session\n session['nb_response_stream'] = nb_request[1]",
"def answer_waiting_call(self) -> None:",
"def send_pending_requests(self):\n while self.pending_requests:\n stream_id = self.pending_requests.popleft()\n\n log.debug(\"initiating request, new stream %s\", stream_id)\n\n # send headers immediately rather than waiting for data. this ensures\n # streams are established with increasing stream ids regardless of when\n # the request data is available\n self.send_headers(stream_id, immediate=True)\n self.send_data(stream_id)",
"def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()",
"def _finish_pending_requests(self) -> None:\n while True:\n num_q, ok_list, err_list = self._multi.info_read()\n for curl in ok_list:\n self._finish(curl)\n for curl, errnum, errmsg in err_list:\n self._finish(curl, errnum, errmsg)\n if num_q == 0:\n break\n self._process_queue()",
"def sync_request(self, *args):\r\n seq = self.send_request(*args)\r\n while seq not in self.sync_replies:\r\n self.serve()\r\n return self.sync_replies.pop(seq)",
"def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)",
"def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure",
"def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=6)\n\n self.request_buffer.process_reply(reply)\n self.assertEqual(len(self.request_buffer.requests), 5)",
"def testSendNextMessage(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(3)\n self.mgr.queueMsg(2)\n self.mgr.queueMsg(1)\n self.mgr.processMsgQueue()\n self.v.send_mavlink.assert_called_with(3)\n self.assertEqual( self.mgr.msgQueue.qsize(), 2)",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()",
"def on_bindok(self, unused_frame):\n\n self.logger.info('queue bound')\n if self.acked:\n # if we wish to care about the servers replies, this is were we set up things\n self.logger.info('issuing confirm.select RPC')\n self._channel.confirm_delivery(self.on_delivery_confirmation)\n\n if self.sender:\n pass\n self.send()\n else:\n self.start_consuming()",
"def on_iteration(self):\n self.send_pending_requests()\n super().on_iteration()",
"def __call__(self):\n dv = None\n #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for.\n while self.active_call_count < self.parallel and self.queue:\n #Get a chunk of entries from the command queue so we can make a batch.\n subqueue = self.queue[:self.max_batch_size]\n self.queue = self.queue[self.max_batch_size:]\n #Send a single batch to the currently selected RPC node.\n dv = self._process_batch(subqueue)\n #If there is nothing left to do, there is nothing left to do\n if not self.queue and self.active_call_count == 0:\n self.log.error(\"Queue is empty and no active HTTPS-POSTs remaining.\")\n if self.stop_when_empty:\n #On request, stop reactor when queue empty while no active queries remain.\n self.reactor.stop() \n return dv",
"def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())",
"async def handle_request():\n nonlocal process, process_task\n logger.debug(\"Waiting for request\")\n request = await queue.get()\n\n if request.name == RequestTypes.run_process:\n assert process is None, \"Process must not have been started\"\n process_state = request.contents\n process = self._start_callback(process_state)\n process_task = asyncio.create_task(process.wait())\n pid = process.pid\n logger.debug(\"Running process in handler: %d\", pid)\n await connection.send(Response(pid))\n\n elif request.name == RequestTypes.wait_process_done:\n assert process is not None, \"Process must have been started\"\n logger.debug(\"Waiting for process to exit\")\n # We don't want the process.wait() task to be cancelled in case\n # our connection gets broken.\n exitcode = await asyncio.shield(process_task)\n logger.debug(\"Result: %d\", exitcode)\n await connection.send(Response(exitcode))\n\n return True",
"def send_async_requests(self):\n\t\tif len(self._async_http_requests) <= 0:\n\t\t\treturn ()\n\n\t\tif self._session is None:\n\t\t\tself.start_new_session()\n\t\tsession = self._session\n\n\t\tresponses = [None] * len(self._async_http_requests)\n\t\t\":type : list\"\n\n\t\tfutures = []\n\t\tfor req, uri, host, auth, decode, ignored in self._async_http_requests:\n\t\t\tif host is None:\n\t\t\t\thost = self._host\n\t\t\t_log_http_request(req, uri, host, auth, self.log_full_request)\n\t\t\tf = self._async_executor.submit(session.send, req)\n\t\t\t# mini data-structure, Tuple[done_yet, future]\n\t\t\tfutures.append((False, f, decode, ignored))\n\t\tself._async_http_requests = []\n\n\t\t# now wait for them to complete\n\t\twhile len([x for x in futures if not x[0]]) > 0:\n\t\t\tnext_futures = []\n\t\t\tfor idx, f in enumerate(futures):\n\t\t\t\tdone_now = f[0]\n\t\t\t\tif not done_now:\n\t\t\t\t\tif f[1].done():\n\t\t\t\t\t\tr = f[1].result()\n\t\t\t\t\t\t_log_http_response(r, self.log_full_response)\n\t\t\t\t\t\tresponses[idx] = (r, f[2], f[3])\n\t\t\t\t\t\tdone_now = True\n\t\t\t\tnext_futures.append((done_now, f[1], f[2], f[3]))\n\t\t\tfutures = next_futures\n\t\t\ttime.sleep(0.01)\n\t\t# they are now done\n\n\t\t# we need to re-raise any exceptions that occur\n\t\tbad_responses = []\n\t\tfor idx, resp_items in enumerate(responses):\n\t\t\tresp, decode, ignored = resp_items\n\t\t\tif resp.status_code not in ignored:\n\t\t\t\ttry:\n\t\t\t\t\tresp.raise_for_status()\n\t\t\t\texcept requests.HTTPError as e:\n\t\t\t\t\t_log.exception(\"HTTPError in request #\" + str(idx) + \": \" + str(e))\n\t\t\t\t\tbad_responses.append(idx)\n\t\tif len(bad_responses) > 0:\n\t\t\tself._async_transforms = []\n\t\t\traise AsyncHTTPError(bad_responses)\n\n\t\t# finally, call the transform function on each one\n\t\ttransformed = []\n\t\tfor r_items, xform in zip(responses, self._async_transforms):\n\t\t\tr, decode, ignored = r_items\n\t\t\tdata = None\n\t\t\tif r.content is not None:\n\t\t\t\tif decode == 'text':\n\t\t\t\t\tdata = r.text\n\t\t\t\telif decode == 'json':\n\t\t\t\t\tdata = r.json(parse_float=decimal.Decimal)\n\t\t\t\telif decode == 'binary':\n\t\t\t\t\tdata = r.content\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Bad response_payload encoding: \" + decode)\n\t\t\t\tdata = xform(data)\n\t\t\ttransformed.append(data)\n\t\tself._async_transforms = []\n\t\treturn transformed",
"def _handle_pending(self):\r\n if not self.pending:\r\n self._post_message('')\r\n return\r\n info, desired = self.pending\r\n if desired and self.plugins[desired].busy:\r\n return\r\n self.busy = True\r\n\r\n if desired:\r\n plugins = [self.plugins[desired]]\r\n elif info.name == 'definition' and not info.editor.is_python():\r\n plugins = [p for p in self.plugins.values() if not p.busy]\r\n else:\r\n # use all but the fallback\r\n plugins = [p for p in list(self.plugins.values())[:-1] if not p.busy]\r\n\r\n self.request = RequestHandler(info, plugins)\r\n self.request.introspection_complete.connect(\r\n self._introspection_complete)\r\n self.pending = None",
"def user_next_command(self, tracer):\n try:\n self.queue.get_nowait()(tracer)\n except Empty:\n return",
"def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )",
"def testSessionTimeout(self):\n\n def testTimeout(res):\n self.failUnlessEqual(res.value.args[0], b'404')\n\n def testCBTimeout(res):\n # check for terminate if we expire\n terminate = res[0].getAttribute('type',False)\n self.failUnlessEqual(terminate, 'terminate')\n\n def sendTest():\n sd = self.send()\n sd.addCallback(testCBTimeout)\n sd.addErrback(testTimeout)\n return sd\n\n def testResend(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n s = self.b.service.sessions[self.sid]\n self.failUnless(s.inactivity==2,'Wrong inactivity value')\n self.failUnless(s.wait==2, 'Wrong wait value')\n return task.deferLater(reactor, s.wait+s.inactivity+1, sendTest)\n\n def testSessionCreate(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n self.failUnless(res[0].hasAttribute('sid'),'Not session id')\n self.sid = res[0]['sid']\n\n # send and wait\n sd = self.send()\n sd.addCallback(testResend)\n return sd\n\n\n\n BOSH_XML = \"\"\"<body content='text/xml; charset=utf-8'\n hold='1'\n rid='%(rid)i'\n to='localhost'\n route='xmpp:127.0.0.1:%(server_port)i'\n ver='1.6'\n wait='2'\n ack='1'\n inactivity='2'\n xml:lang='en'\n xmlns='http://jabber.org/protocol/httpbind'/>\n \"\"\"% { \"rid\": self.rid, \"server_port\": self.server_port }\n\n return self.proxy.connect(BOSH_XML).addCallbacks(testSessionCreate)",
"def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass",
"def fake_twisted_request(*args, **kwargs):\n kwargs.setdefault(\n 'Request', lambda channel: Request(channel=channel, queued=False))\n request = fake_nevow_request(*args, **kwargs)\n request.finish = lambda: next(request.finish.counter)\n request.finish.counter = itertools.count()\n return request",
"def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback",
"def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)",
"def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)",
"def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass",
"def ProcessRequests(self, manager):\n self._CreateSpool()\n metrics_set = self._MetricsSet(\n *(constructor(self._METRIC_PREFIX + name)\n for name, constructor in self._METRICS_CONSTRUCTORS))\n pending_requests = []\n timestamps = {}\n tick_count = 0\n next_heartbeat = time.time()\n while True:\n tick_count += 1\n if time.time() >= next_heartbeat:\n next_heartbeat = time.time() + self._HEARTBEAT_INTERVAL\n logging.debug('Starting tick number %d', tick_count)\n manager.StartTick()\n\n num_completed = 0\n for request_id, result in manager.Reap():\n num_completed += 1\n metrics_set.total_completed.increment(fields={'status': 'normal'})\n time_running = time.time() - timestamps.pop(request_id)\n metrics_set.time_running.add(time_running)\n self._CompleteRequest(request_id, result)\n\n num_added = 0\n for request_id in self._GetNewRequests():\n num_added += 1\n metrics_set.total_received.increment()\n timestamps[request_id] = time.time()\n pending_requests.append(request_id)\n\n num_aborted = 0\n for abort_id in self._GetAbortRequests():\n num_aborted += 1\n metrics_set.total_completed.increment(fields={'status': 'abort'})\n if abort_id in timestamps:\n time_to_abort = time.time() - timestamps.pop(abort_id)\n metrics_set.time_to_abort.add(time_to_abort)\n self._ProcessAbort(abort_id, pending_requests, manager)\n\n num_started = 0\n while pending_requests and manager.HasCapacity():\n num_started += 1\n request_id = pending_requests.pop(0)\n time_now = time.time()\n time_waiting = time_now - timestamps[request_id]\n metrics_set.time_waiting.add(time_waiting)\n timestamps[request_id] = time_now\n self._StartRequest(request_id, manager)\n\n if num_completed or num_added or num_aborted or num_started:\n logging.info('new: %d, started: %d, aborted: %d, completed: %d',\n num_added, num_started, num_aborted, num_completed)\n num_pending = len(pending_requests)\n num_running = len(manager)\n logging.info('pending: %d, running: %d', num_pending, num_running)\n metrics_set.task_count.set(num_pending,\n fields={'state': 'pending'})\n metrics_set.task_count.set(num_running,\n fields={'state': 'running'})\n metrics_set.ticks.increment()\n time.sleep(manager.sample_interval)",
"def queue():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Add the operation to the local queue.\n operation = manager.enqueue(session.get_sid())\n\n # If the local list of operation to solve is empty the Mmanager.enqueue`\n # method returns `None`, ence if the value of `operation` is `None`\n # the client should stop.\n return dict(ok=True, operation=operation, halt=operation is None)",
"def rpc_sendback(rpc_flag):\n credential = pika.PlainCredentials('guest', 'guest')\n rpc_connection = pika.BlockingConnection(pika.ConnectionParameters(\n host='localhost', port=5672, virtual_host='/', credentials=credential))\n rpc_channel = rpc_connection.channel()\n rpc_channel.queue_declare(queue=str(rpc_flag))\n #send message to the command center using basic_publish\n if rpc_flag == \"c02\":\n rpc_channel.basic_publish(exchange='', routing_key=str(\n rpc_flag), body='Drone has reached the delivery address')\n elif rpc_flag == \"c03\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has unloaded the item')\n elif rpc_flag == \"c04\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has reached the parking spot and available for next instruction')",
"def serviceRequests(self):\n for k in self.sessions:\n request = self.sessions[k].ongoingRequest\n client = self.sessions[k].clientInstance\n\n if request and request[0] == \"PULL\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(client, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n del self.outgoingBuffer[request[1]]\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH\":\n # Create a copy of your FSIC and sends it to client\n localFSIC = self.calcFSIC(request[2])\n # PUSH2 request : (\"PUSH2\", pushID, filter, localFSIC)\n self.send(client, k, (\"PUSH2\", request[1], request[2], localFSIC))\n self.sessions[k].ongoingRequest = None\n\n elif request and request[0] == \"PUSH2\":\n filter, snapshot = self.fsicDiffAndSnapshot(request[2], request[3])\n self.queue(request[1], filter, snapshot)\n self.send(self.sessions[k].serverInstance, k, (\"DATA\", request[1], self.outgoingBuffer[request[1]]))\n self.sessions[k].ongoingRequest = None\n\n elif request:\n raise ValueError('Invalid Request!')",
"def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)",
"def test_dispatch_all0(self):\n req1 = FakeRequest(1, False)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, False)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.dispatch_all()\n\n self.assertEqual(\n [True]*5,\n [req.dispatched for req in self.request_buffer.requests]\n )",
"def send_req(self):\n self.n_send_req += 1",
"def test_async_rpcmethod(node_factory, executor):\n l1 = node_factory.get_node(options={'plugin': os.path.join(os.getcwd(), 'tests/plugins/asynctest.py')})\n\n results = []\n for i in range(10):\n results.append(executor.submit(l1.rpc.asyncqueue))\n\n time.sleep(3)\n\n # None of these should have returned yet\n assert len([r for r in results if r.done()]) == 0\n\n # This last one triggers the release and all results should be 42,\n # since the last number is returned for all\n l1.rpc.asyncflush(42)\n\n assert [r.result() for r in results] == [42] * len(results)",
"def _accept_requests(self):\n try:\n request = self.request_queue.get(timeout=self.REQUESTS_TIMEOUT)\n self.logger.debug(\"Adding new requests\")\n for _ in xrange(self.REQUESTS_MAX_AMOUNT):\n self._requests.append(request)\n request = self.request_queue.get_nowait()\n\n except EmptyQueueError:\n return\n\n self.logger.debug(\"Done adding new requests\")",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)",
"def handle_promise(self) -> None:\n self.state = 'ACCEPTED'\n for acceptor in self.sim.a:\n respond_m = Message(self, acceptor, 'ACCEPT', self.value, self.p_id, None)\n self.n.queue_message(respond_m)",
"def testQueueFlushQueue(self):\n self.mgr.sendState = Mock()\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(1)\n self.mgr.queueMsg(2)\n self.assertEqual(self.mgr.msgQueue.qsize(), 2)\n self.mgr.lastRequestSent = monotonic.monotonic() - 3.0\n self.mgr.queueMsg(3)\n self.assertTrue(self.mgr.msgQueue.empty)\n self.mgr.sendState.assert_called_with()",
"def _flash_queued_window(self) -> None:\n try:\n message = self.events.get(timeout=1)\n self.processing_event = True\n except Empty:\n return None\n\n try:\n self.router.route_request(message)\n except UnexpectedMessageType:\n logging.error(f\"Unexpected request type - {message.event_type}. Aborting...\")\n self.shutdown()\n except WMError:\n pass\n finally:\n self.processing_event = False",
"def work():\n with rq.Connection(create_connection()):\n worker = rq.Worker(list(map(rq.Queue, listen)))\n worker.work()",
"def process_AResponse(self) :\n while (1):\n str = self.recv(self.sock)\n if (len(str) > 0):\n response = amazon_pb2.AResponses()\n response.ParseFromString(str)\n print(response)\n # handle import new stock\n for arrive in response.arrived:\n things = arrive.things\n for thing in things:\n products = Whstock.objects.filter(pid = thing.id)\n if len(products) != 0:\n products[0].count = products[0].count + thing.count\n products[0].save()\n else :\n #need to specify world id\n whstock = Whstock()\n whstock.hid = arrive.whnum\n whstock.pid = thing.id\n whstock.dsc = thing.description\n whstock.count = thing.count\n whstock.save()\n # handle pack ready response\n #when ready send AU command to let UPS truck pickup,\n #use another thread for wait for UPS response\n #when receive response send ALoad command\n #when reveived loaded for Sim send AU command and let flag = 1;\n # tell UPS packages is ready and ask for trucks (provide destinaiton address)\n # tell warehouse to load when UPS trucks ready\n for currReady in response.ready:\n #save current state\n trans = Transaction.objects.get(ship_id = currReady)\n trans.ready = True\n trans.save()\n #connect to UPS\n ups_handler = threading.Thread(target=self.process_Uresponse, args=(trans,))\n ups_handler.start()\n self.AUCommand(trans, 0)\n print(\"first msg for UPS sent(to pickup)\")\n ups_handler.join()\n\n #load info from sim\n for load in response.loaded:\n #save current state\n trans = Transaction.objects.get(ship_id = load)\n trans.loaded = True\n trans.save()\n #connect to UPS\n self.AUCommand(trans, 1)\n print(\"second msg for UPS sent(get load success from sim world)\")",
"def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!",
"def _process_requests_in_background(self):\n while True:\n readable, writable, exceptional = self._bg_select_peers()\n\n for peer in readable:\n data = peer.socket.recv(RECV_BYTES)\n if data:\n peer.incoming_buffer.feed(data)\n try:\n response = peer.incoming_buffer.unpack()\n except msgpack.OutOfData:\n continue\n peer.handle_response(response)\n else:\n self._bg_clean_up_peer(peer)\n if peer in writable:\n writable.remove(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n\n for peer in writable:\n # single-reader configuration means we can safely unlock between\n # peeking and committing.\n with peer.lock:\n next_bytes = peer.outgoing_buffer.peek(SEND_BYTES)\n if not next_bytes:\n continue\n\n sent_bytes = peer.socket.send(next_bytes)\n if sent_bytes == 0:\n self._bg_clean_up_peer(peer)\n if peer in exceptional:\n exceptional.remove(peer)\n continue\n\n with peer.lock:\n peer.outgoing_buffer.commit_read(sent_bytes)\n\n for peer in exceptional:\n self._bg_clean_up_peer(peer)",
"def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())",
"def make_work_request(self):\n request = StoreRequest()\n self.bb_client.read_wait(request, self.handle_request)",
"def render(self, request):\n print self.rqs, request\n if self.rqs:\n self.rqs.write(\"x\")\n self.rqs.finish()\n self.rqs = None\n self.rqs = request\n return server.NOT_DONE_YET",
"def queueStatusAll():",
"def activate(self):\n self.socket.listen(self.request_queue_size)",
"def request() -> None:\n\t_flag.set()",
"def server_activate(self):\n\t\tself.socket.listen(self.request_queue_size)",
"def _process_batch(self, subqueue):\n try:\n timeoutCall = None\n jo = None\n if self.max_batch_size == 1:\n #At time of writing, the regular nodes have broken JSON-RPC batch handling.\n #So when max_batch_size is set to one, we assume we need to work around this fact.\n jo = json.dumps(self.entries[subqueue[0]]._get_rpc_call_object())\n else:\n #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes.\n qarr = list()\n for num in subqueue:\n qarr.append(self.entries[num]._get_rpc_call_object())\n jo = json.dumps(qarr)\n url = \"https://\" + self.nodes[self.node_index] + \"/\"\n url = str.encode(url)\n deferred = self.agent.request('POST',\n url,\n Headers({\"User-Agent\" : ['Async Steem for Python v0.6.1'],\n \"Content-Type\": [\"application/json\"]}),\n _StringProducer(jo))\n def process_one_result(reply):\n \"\"\"Process a single response from an JSON-RPC command.\"\"\"\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n def handle_response(response):\n \"\"\"Handle response for JSON-RPC batch query invocation.\"\"\"\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addCallback(handle_response)\n def _handle_error(error):\n \"\"\"Handle network level error for JSON-RPC request.\"\"\"\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addErrback(_handle_error)\n timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel)\n #Keep track of the number of active parallel HTTPS posts.\n self.active_call_count = self.active_call_count + 1\n return deferred\n except Exception as ex:\n self.log.failure(\"Error in _process_batch {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])",
"def handle(self, name, request, response):\n self._acquire() # prevent other calls to protect data structs\n try:\n pool, requests, ready = self._lists\n # queue request\n requests.append((name, request, response))\n if ready:\n # If any threads are ready and waiting for work\n # then remove one of the locks from the ready pool\n # and release it, letting the waiting thread go forward\n # and consume the request\n ready.pop().release()\n\n finally:\n self._release()",
"def _send(self):\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n\n status_sent = False\n output_query_count = 0\n\n queues = []\n executor_keys = self.job.executor_id.split('-')\n for k in range(int(len(executor_keys)/2)):\n qname = 'lithops-{}'.format('-'.join(executor_keys[0:k*3+2]))\n queues.append(qname)\n\n while not status_sent and output_query_count < 5:\n output_query_count = output_query_count + 1\n try:\n with self._create_channel() as ch:\n for queue in queues:\n ch.basic_publish(exchange='', routing_key=queue, body=dmpd_response_status)\n logger.info(\"Execution status sent to RabbitMQ - Size: {}\".format(drs))\n status_sent = True\n except Exception:\n time.sleep(0.2)\n\n if self.status['type'] == '__end__':\n super()._send()",
"def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next",
"def process_request(t):\n time.sleep(t)",
"def process_request(t):\n time.sleep(t)",
"def process_request(t):\n time.sleep(t)",
"def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF",
"def putonqueue(self, nr, *args):\n self.outqueues[10-nr].put_nowait(args)\n self.tickqueue.put_nowait('go')",
"def on_bindok(self, unused_frame):\n logger.info('Queue bound')\n self.setup_error_queue()",
"def test_wait_for_dispatched_outbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_outbound(1, 'fooconn')\n self.assertNoResult(d)\n msg = msg_helper.make_outbound('message')\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.outbound', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])",
"def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))",
"def _pending(self, connection):\r\n\r\n # calls the run (tick) starter operation that should start\r\n # and run all the starters registered for the connection or\r\n # continue any starter operation that is pending for it\r\n return connection.run_starter()",
"def traceQueueContents(self):\n from typhon.objects.printers import toString\n debug_print(\"Pending queue for \" + self.name.encode(\"utf-8\"))\n for (resolver, target, atom, args, namedArgs) in self._pending:\n debug_print(toString(target).encode('utf-8') +\n \".\" + atom.verb.encode('utf-8') + \"(\" +\n ', '.join([toString(a).encode('utf-8')\n for a in args]) + \")\")",
"def enqueue_next_page_requests(fn):\n @wraps(fn)\n def inner(self, *args, **kwargs):\n logging.debug(\"[+] Queueing next page calls ..\")\n request_or_requests = fn(self, *args, **kwargs)\n if not request_or_requests:\n return\n\n response = self._get_response(args, kwargs)\n response_id = response.meta['__id']\n\n # Transform to list if it's a single request\n if not isinstance(request_or_requests, list):\n request_or_requests = [request_or_requests]\n\n self._request_registry[response_id]['nprs'] = request_or_requests\n\n return inner",
"def _queue_response(\n self,\n resp_address,\n req_id,\n status,\n message):\n\n incoming = self._incoming.get(resp_address)\n if not incoming:\n return\n\n data = incoming.packer.pack([req_id, status, message])\n try:\n resp_address.write(data)\n except pyuv.error.HandleClosedError:\n # This can happen if the client prematurely closes the\n # connection. Though the better thing to do would be to\n # detect this situation and call a call handler function\n # to take some action.\n pass",
"def run_sync_server(host, port, to_server_queue, from_server_queue):\n app = Flask(__name__)\n\n def shutdown_server():\n shutdown = request.environ.get(\"werkzeug.server.shutdown\")\n if shutdown is None:\n raise RuntimeError(\"Not running with the Werkzeug Server\")\n shutdown()\n\n @app.route(\"/server/status\", methods=[\"GET\"])\n def server_status():\n global mtu, status\n print(\"RECEIVED REQUEST /server/status\")\n\n msg = None if to_server_queue.empty() else to_server_queue.get()\n\n if msg:\n # Update global state\n mtu, status = msg[\"server_mtu\"], msg[\"server_status\"]\n\n if status == \"SHUTDOWN\":\n # App will shutdown after sending one last response\n shutdown_server()\n from_server_queue.put(\"SHUTDOWN\")\n elif status == \"INITIALIZED\":\n pass\n else:\n raise NotImplementedError()\n\n return jsonify({\"server_mtu\": mtu, \"server_status\": status})\n else:\n # Return current state\n return jsonify({\"server_mtu\": mtu, \"server_status\": status})\n\n @app.route(\"/peer/ready\", methods=[\"GET\"])\n def peer_ready():\n \"\"\"Peer is done with its cycle and is waiting for next cycle.\"\"\"\n global mtu, status\n print(\"RECEIVED REQUEST /peer/ready\")\n status = \"NOT_INITIALIZED\"\n\n from_server_queue.put(\"INITIALIZE\")\n return jsonify({\"server_mtu\": mtu, \"server_status\": status})\n\n # @app.route(\"/server/shutdown\", methods=[\"GET\"])\n # def server_restart():\n # from_server_queue.put(\"shutdown\")\n # shutdown_server()\n # return jsonify(jsonify({\"server_mtu\": mtu, \"server_status\": \"shutdown\"}))\n\n # Blocking call until the server received a GET request on /server/shutdown after which\n # the flask server is shutdown\n app.run(host=host, port=port)",
"def _http_thread_func(self):\r\n while not self._terminating:\r\n # pop queued request from the queue and process it\r\n (api_endpoint, params, reqid) = self.http_requests.get(True)\r\n translated = None\r\n try:\r\n answer = self.http_signed_call(api_endpoint, params)\r\n if answer[\"result\"] == \"success\":\r\n # the following will reformat the answer in such a way\r\n # that we can pass it directly to signal_recv()\r\n # as if it had come directly from the websocket\r\n translated = {\r\n \"op\": \"result\",\r\n \"result\": answer[\"data\"],\r\n \"id\": reqid\r\n }\r\n else:\r\n if \"error\" in answer:\r\n if answer[\"token\"] == \"unknown_error\":\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n else:\r\n\r\n # these are errors like \"Order amount is too low\"\r\n # or \"Order not found\" and the like, we send them\r\n # to signal_recv() as if they had come from the\r\n # streaming API beause Gox() can handle these errors.\r\n translated = {\r\n \"op\": \"remark\",\r\n \"success\": False,\r\n \"message\": answer[\"error\"],\r\n \"token\": answer[\"token\"],\r\n \"id\": reqid\r\n }\r\n\r\n else:\r\n self.debug(\"### unexpected http result:\", answer, reqid)\r\n\r\n except Exception as exc:\r\n # should this ever happen? HTTP 5xx wont trigger this,\r\n # something else must have gone wrong, a totally malformed\r\n # reply or something else.\r\n #\r\n # After some time of testing during times of heavy\r\n # volatility it appears that this happens mostly when\r\n # there is heavy load on their servers. Resubmitting\r\n # the API call will then eventally succeed.\r\n self.debug(\"### exception in _http_thread_func:\",\r\n exc, api_endpoint, params, reqid)\r\n\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n\r\n if translated:\r\n self.signal_recv(self, (json.dumps(translated)))\r\n\r\n self.http_requests.task_done()",
"def putonqueue(self, nr, *args):\n\n self.outqueues[nr].put_nowait(*args)\n self.tickqueue.put_nowait('go')",
"def bind_queue(self):\n # pylint: disable=protected-access\n future = self._backend._create_future()\n\n def on_bindok(unused_frame):\n future.set_result(True)\n\n self.log.debug('Bind queue exchange=%s, routing_key=%s',\n self.exchange, self.routing_key)\n self._channel.queue_bind(on_bindok, self.name,\n self.exchange, self.routing_key)\n\n return future",
"def BeginSession( self ) : \r\n\r\n ## self._connection.write( 'Q%s' % ( systemSpec, ) ) \r\n ## assert self.GetServerResponse() == True # \" the quick-&-dirty way \" // to-do: create an own exception \r\n\r\n message = self._fmt.pack( 'Q', self._system_spec )\r\n self._socket.write( message ) \r\n\r\n # debug\r\n print \"BS: \", message \r\n\r\n return self.GetServerResponse()",
"def on_queue(self):\n self.ws_opened.wait()\n\n while self.wsapp.keep_running:\n try:\n msg = self.shot_outbox.get(timeout=0.001)\n except:\n continue\n action = msg['action']\n payload = msg['payload']\n\n if action == 'remote':\n # Choose the remote server\n buf = json.dumps(payload)\n self.__ws_conn.send(f\"{buf}\\n\")\n elif action == 'local':\n # Choose the local server\n result = payload['params']['result']\n shot = payload['shot']\n prev = self.ret.get(result, 0)\n self.ret[result] = prev + 1\n\n del self.shot_threadings[shot]\n self.__shot_finished[shot] = True\n self.__bar.update(1)\n if all(self.__shot_finished):\n # All shots are completed\n self.failed = False\n self.wsapp.keep_running = False\n break",
"def process(self):\n if not self._requests:\n return\n\n self._processing = True\n Engine.instance().start()",
"def ready(self, all_ok, message):\r\n assert self.status == WAIT_PROCESS\r\n if not all_ok:\r\n self.close()\r\n self.wake_up()\r\n return\r\n self.len = ''\r\n if len(message) == 0:\r\n # it was a oneway request, do not write answer\r\n self.message = ''\r\n self.status = WAIT_LEN\r\n else:\r\n self.message = struct.pack('!i', len(message)) + message\r\n self.status = SEND_ANSWER\r\n self.wake_up()",
"def start(self):\n if self._real_send:\n raise RuntimeError('Mocker has already been started')\n\n self._real_send = requests.Session.send\n\n def _fake_get_adapter(session, url):\n return self._adapter\n\n def _fake_send(session, request, **kwargs):\n real_get_adapter = requests.Session.get_adapter\n requests.Session.get_adapter = _fake_get_adapter\n\n try:\n return self._real_send(session, request, **kwargs)\n except exceptions.NoMockAddress:\n if not self._real_http:\n raise\n finally:\n requests.Session.get_adapter = real_get_adapter\n\n return self._real_send(session, request, **kwargs)\n\n requests.Session.send = _fake_send",
"def testQueueSend(self):\n self.mgr.queueMsg(37)\n self.assertTrue( self.mgr.msgQueue.empty() )\n self.v.send_mavlink.assert_called_with(37)",
"def start_finish(self):\r\n self.send_queue.put(('finish',))",
"def handle(self, sim_manager, state):\n super().handle(sim_manager, state)\n state.queue_job(self.job_index)\n if state.has_free_view_slots():\n # todo: revisit this...\n self.trigger_next = False\n job_idx = state.get_next_job()\n sim_manager.start_new_job(job_idx, state)\n sim_manager.event_heap.update_event_heap_counts('job_arrivals', False)",
"def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)",
"def test_wait_for_dispatched_inbound(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_inbound(1, 'fooconn')\n self.assertNoResult(d)\n msg = msg_helper.make_inbound('message')\n yield self._add_to_dispatched(\n worker_helper.broker, 'fooconn.inbound', msg, kick=True)\n dispatched = success_result_of(d)\n self.assertEqual(dispatched, [msg])",
"def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)",
"def test_solicitation_no_reply_resend(self):\n waittime = self.autoconflayer._solicitation_timeout * 4.0\n self.autoconflayer.start_process()\n interest = Interest(Name('/foo/bar'))\n self.queue_from_higher.put([None, interest])\n\n # Catch all data the autoconfig layer sends downwards for 3 seconds\n deadline = datetime.utcnow() + timedelta(seconds=waittime)\n tolower = []\n while datetime.utcnow() < deadline:\n try:\n data = self.queue_to_lower.get(timeout=waittime/10)\n tolower.append(data)\n except queue.Empty:\n pass\n # Make sure the broadcast face was actually created and get its face id\n bcfid = self.faceidtable.get_or_create_faceid(AddressInfo(('127.255.255.255', 4242), 0))\n self.assertIsNotNone(bcfid)\n # Make sure the forwarder solicitation was sent more than once\n solictiation = Interest(Name('/autoconfig/forwarders'))\n solictiation_count = len([1 for data in tolower if data == [bcfid, solictiation]])\n self.assertGreater(solictiation_count, 1)",
"def test_bufferedServerStatus(self):\n self.server.dataReceived(\n b'01 FETCH 1,2 BODY[]\\r\\n'\n )\n\n # Two iterations yields the untagged response and the first\n # fetched message's body\n twice = functools.partial(next, iter([True, True, False]))\n self.flushPending(asLongAs=twice)\n\n self.assertEqual(\n self.transport.value(), b''.join([\n # The untagged response...\n b'* 1 FETCH (BODY[] )\\r\\n',\n # ...and its body\n networkString(\n '{5}\\r\\n\\r\\n\\r\\n%s' % (\n nativeString(self.messages[0].getBodyFile().read()),\n )\n ),\n ]))\n\n self.transport.clear()\n\n # A server status change...\n self.server.modeChanged(writeable=True)\n\n # ...remains buffered...\n self.assertFalse(self.transport.value())\n\n self.flushPending()\n\n self.assertEqual(self.transport.value(), b''.join([\n # The untagged response...\n b'* 2 FETCH (BODY[] )\\r\\n',\n # ...the status change...\n b\"* [READ-WRITE]\\r\\n\",\n # ...and the completion status and final message's body\n networkString(\n '01 OK FETCH completed\\r\\n{5}\\r\\n\\r\\n\\r\\n%s' % (\n nativeString(self.messages[1].getBodyFile().read()),\n )\n ),\n ]))",
"def cb_test( self, ):\r\n # this shows how to run stuff in the helper -- call thru queue, post to queue\r\n self.post_to_queue( \"call\", self.helper_thread.test_test_ports , ( ) )",
"def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)",
"def complete():\n\n # Check if the client is connected.\n if not session.is_connected():\n return dict(ok=False, error=\"Client not connected\")\n\n # Extract the result ftom the request body.\n body = request.json or {}\n result = body.get(\"result\")\n operation = body.get(\"operation\")\n\n # Mark the operation as solved.\n if manager.solve(session.get_sid(), operation, result):\n return dict(ok=True)\n\n return dict(ok=False, error=\"Operation not queued by this client\")",
"def _resend_subscriptions_and_strategies(self):\n for req in self._ws_jsonrpc_cache:\n self._logger.info('Resending JSONRPCRequest %s', req)\n result = yield self._send(req)\n self._logger.info('Resent JSONRPCRequest, with result: %s', result)",
"def test_dispatch_all1(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.dispatch_all()\n\n self.assertTrue(\n req1 not in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 not in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 in self.request_buffer.requests\n )",
"def console_request(self, evt, proto):\n if evt.kind == sugar.transport.ServerMsgFactory.TASK_RESPONSE:\n threads.deferToThread(self.on_broadcast_tasks, evt, proto)",
"def start_session():\n current_f_name = inspect.currentframe().f_code.co_name\n\n logger.info('{}: Start_session called'.format(current_f_name))\n\n if not pipong_is_pinger():\n return jsonify({\n 'result': 'failure',\n 'msg': 'this server is not a pinger'\n })\n\n response = {'result': 'success'}\n data = request.get_json()\n logger.info(data)\n\n try:\n host_list = data['hosts']\n remote_id = data['master_iteration_id']\n tracert_qty = data['tracert_qty']\n ip_addr = request.remote_addr\n\n exists = db.session.query(\n db.session.query(models.PingerIteration).filter_by(\n remote_id=str(remote_id)).exists()).scalar()\n\n if not exists:\n s = db.session()\n iter_t = models.PingerIteration(\n status=\"CREATED\",\n remote_id=str(remote_id),\n remote_address=ip_addr,\n tracert_qty=tracert_qty)\n s.add(iter_t)\n s.flush()\n\n for k, v in host_list.items():\n api_port = v['api_port']\n api_protocol = v['api_protocol']\n ponger_t = models.Ponger(\n address=k,\n pinger_iteration_id=iter_t.id,\n api_port=api_port,\n api_protocol=api_protocol)\n s.add(ponger_t)\n s.flush()\n\n s.commit()\n\n logger.info('{}: New pinger iteration ID:{}'.format(\n current_f_name, iter_t.id))\n\n tasks.pinger_tasks.perform_pipong_iteration_1.apply_async(\n args=[iter_t.id], kwargs={})\n response['ping_iteration_id'] = iter_t.id\n else:\n logger.error(\n '{}: Remote id already registered'.format(current_f_name))\n return jsonify({\n 'result': 'failure',\n 'msg': 'remote id already registered'\n })\n\n logger.info('{}: port_list:{} ip_addr:{} exists:{}'.format(\n current_f_name, host_list, ip_addr, exists))\n except Exception:\n exception_log = traceback.format_exc()\n logger.debug('{}: e:{}'.format(current_f_name, exception_log))\n jsonify({'result': 'failure', 'msg': exception_log})\n\n return jsonify(response)",
"def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)",
"def test_wait(nsproxy):\n server = run_agent('server', base=ServerLate)\n client = run_agent('client', base=Client)\n logger = run_logger('logger')\n client.set_logger(logger)\n sync_agent_logger(client, logger)\n\n # Connect clients\n server_addr = server.addr('publish')\n client.connect(server_addr, alias='sub', handler=append_received)\n\n # Publish from server\n server.each(0, 'publish')\n\n # Wait for client to receive some data\n N = 10\n assert wait_agent_attr(client, length=N)\n\n # Response received in time\n fast = 0\n client.send('sub', fast, handler=append_received, wait=0.5)\n time.sleep(0.2)\n assert server.get_attr('received') == [fast]\n assert 'x' + str(fast) in client.get_attr('received')\n\n # Response not received in time\n slow = 1\n client.send('sub', slow, handler=append_received, wait=0.1)\n assert logger_received(logger,\n log_name='log_history_warning',\n message='not receive req',\n timeout=0.5)\n assert server.get_attr('received') == [fast, slow]\n assert 'x' + str(slow) not in client.get_attr('received')\n\n # Response not received in time with error handler\n slow = 1\n client.send('sub', slow, handler=append_received, wait=0.1,\n on_error=on_error)\n assert wait_agent_attr(client, name='error_log', length=1, timeout=0.5)\n assert server.get_attr('received') == [fast, slow]\n assert 'x' + str(slow) not in client.get_attr('received')",
"def startingNewStep(self):\n with self.__queueLock:\n self.__submittedJobs = []",
"def process_pending_commands(self):\n\n while self.local_commands:\n command = self.local_commands.popleft()\n\n if isinstance(command, Command):\n if self.cfg['VoipIO']['debug']:\n self.cfg['Logging']['system_logger'].debug(command)\n\n if command.parsed['__name__'] == 'stop':\n # discard all data in play buffer\n while self.audio_play.poll():\n data_play = self.audio_play.recv()\n\n return True\n\n if command.parsed['__name__'] == 'flush':\n # discard all data in play buffer\n while self.audio_play.poll():\n data_play = self.audio_play.recv()\n\n self.local_audio_play.clear()\n self.mem_player.flush()\n self.audio_playing = False\n\n # flush the recorded data\n while self.mem_capture.get_read_available():\n data_rec = self.mem_capture.get_frame()\n #self.mem_capture.flush()\n self.audio_recording = False\n\n self.commands.send(Command(\"flushed()\", 'VoipIO', 'HUB'))\n\n return False\n\n if command.parsed['__name__'] == 'flush_out':\n # discard all data in play buffer\n while self.audio_play.poll():\n data_play = self.audio_play.recv()\n\n self.local_audio_play.clear()\n self.mem_player.flush()\n self.audio_playing = False\n\n self.commands.send(Command(\"flushed_out()\", 'VoipIO', 'HUB'))\n \n return False\n\n if command.parsed['__name__'] == 'make_call':\n # make a call to the passed destination\n self.make_call(command.parsed['destination'])\n return False\n\n if command.parsed['__name__'] == 'transfer':\n # transfer the current call to the passed destination\n self.transfer(command.parsed['destination'])\n\n return False\n\n if command.parsed['__name__'] == 'hangup':\n # hangup the current call\n self.hangup()\n\n return False\n\n if command.parsed['__name__'] == 'black_list':\n # black list the passed remote uri, VoipIO will not accept any\n # calls until the current time will be higher then the expire variable\n remote_uri = command.parsed['remote_uri']\n expire = int(command.parsed['expire'])\n\n self.black_list[remote_uri] = expire\n\n return False\n\n raise VoipIOException('Unsupported command: ' + command)\n\n return False",
"def _exec_cmd_nb(self, cmd):\n job.run_async(cmd)",
"def test_wait(nsproxy):\n server = run_agent('server', base=ServerLate)\n client = run_agent('client', base=Client)\n logger = run_logger('logger')\n client.set_logger(logger)\n sync_agent_logger(client, logger)\n\n # Connect clients\n server_addr = server.addr('publish')\n client.connect(server_addr, alias='sub', handler=append_received)\n\n # Publish from server\n server.each(0, 'publish')\n\n # Wait for client to receive some data\n n = 10\n assert wait_agent_attr(client, length=n)\n\n # Response received in time\n fast = 0\n client.send('sub', fast, handler=append_received, wait=0.5)\n time.sleep(0.2)\n assert server.get_attr('received') == [fast]\n assert 'x' + str(fast) in client.get_attr('received')\n\n # Response not received in time\n slow = 1\n client.send('sub', slow, handler=append_received, wait=0.1)\n assert logger_received(\n logger,\n log_name='log_history_warning',\n message='not receive req',\n timeout=0.5,\n )\n assert server.get_attr('received') == [fast, slow]\n assert 'x' + str(slow) not in client.get_attr('received')\n\n # Response not received in time with error handler\n slow = 1\n client.send(\n 'sub', slow, handler=append_received, wait=0.1, on_error=on_error\n )\n assert wait_agent_attr(client, name='error_log', length=1, timeout=0.5)\n assert server.get_attr('received') == [fast, slow]\n assert 'x' + str(slow) not in client.get_attr('received')",
"async def test_cdpsession_list_response(cdp):\n with cdp.method_subscription([\"finished\"]) as queue:\n await cdp.send(\"\", await_response=False)\n try:\n await asyncio.wait_for(queue.get(), timeout=5)\n except asyncio.TimeoutError:\n assert not cdp.listening_stopped.is_set()"
] |
[
"0.63733757",
"0.58320886",
"0.5644144",
"0.5606107",
"0.55666107",
"0.55509126",
"0.55193716",
"0.551203",
"0.5509601",
"0.5445808",
"0.5436328",
"0.5436328",
"0.5414003",
"0.5397534",
"0.5392256",
"0.5372994",
"0.53597814",
"0.5358608",
"0.52984124",
"0.5293183",
"0.5270412",
"0.52681863",
"0.52507424",
"0.5246172",
"0.52423346",
"0.52421176",
"0.52363044",
"0.5224766",
"0.52161074",
"0.5212833",
"0.5184243",
"0.5178287",
"0.517701",
"0.51693004",
"0.51660883",
"0.51659316",
"0.51586556",
"0.51515234",
"0.51241136",
"0.51070815",
"0.5102136",
"0.510014",
"0.5089787",
"0.5083334",
"0.5082244",
"0.5080889",
"0.50749606",
"0.5055881",
"0.50498414",
"0.5004454",
"0.4987991",
"0.4985429",
"0.49849927",
"0.49840334",
"0.4972824",
"0.49699655",
"0.49603403",
"0.49540484",
"0.49540484",
"0.49540484",
"0.4949499",
"0.4945337",
"0.49419314",
"0.49416968",
"0.49415398",
"0.49383494",
"0.49352318",
"0.493364",
"0.49191275",
"0.4913199",
"0.49123883",
"0.49106574",
"0.4905561",
"0.49038857",
"0.4897932",
"0.48979306",
"0.48966998",
"0.48915827",
"0.48843312",
"0.48798567",
"0.48741987",
"0.48713872",
"0.48672754",
"0.48668152",
"0.4859821",
"0.48541948",
"0.48488912",
"0.48436916",
"0.48402056",
"0.4836578",
"0.4834236",
"0.48310307",
"0.48285893",
"0.48180264",
"0.48168954",
"0.48164344",
"0.48152483",
"0.48121116",
"0.4809906",
"0.4808045"
] |
0.6577181
|
0
|
Handle a response to an RPC. Independently of the outcome, we always move back to SendingRpc state so that the other RPCs in queue can have a chance to be handled
|
def handle_message(self, session, message):
rpc_id = get_element('id', message)
try:
if 'error' in session['rpc']['message']:
raise ClientResponseError(session['rpc']['message'], 'error')
if session['rpc']['id'] != rpc_id:
# RPC id in response does not match request, log and move on
data = str(session['rpc']['id']) + ' != ' + str(rpc_id)
raise ClientResponseError(session['rpc']['message'], 'wrong_rpc_id', data=data)
else:
# Handle response for the RPC method that was last sent to CLIENT
session['rpc']['method'].handle_response(session, message)
except (ClientMethodException, ClientResponseError) as err:
# Error parsing response, handle it and move on
LOG.debug("Error parsing client response: {!s}".format(err))
error_handler = getattr(session['rpc']['method'], "handle_error", None)
if callable(error_handler):
# Call this client Method's specific error handler, if defined
error_handler(session)
except (BrokenPipeError, Exception) as err:
if isinstance(err, BrokenPipeError):
# Silenced exception; client took too long to process and
# respond to the Northbound request
pass
else:
# Unexpected exception, log it with traceback and move on
LOG.debug('Unexpected error during client Response handling: %s', str(err))
LOG.debug(traceback.print_exc())
return RPCS.SendingRpc, None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response",
"def process_incoming_response(self, response):\n # Validate the response.\n if not {\"__id\", \"__data\", \"__error\"}.issubset(iterkeys(response)):\n self.disconnect(\"Bad response received\")\n logger.warning(\"Response is missing some fields, ignoring.\")\n return\n\n # Determine the ID.\n id_ = response[\"__id\"]\n\n if id_ not in self.pending_outgoing_requests:\n logger.warning(\"No pending request with id %s found.\", id_)\n return\n\n request = self.pending_outgoing_requests.pop(id_)\n result = self.pending_outgoing_requests_results.pop(id_)\n error = response[\"__error\"]\n\n if error is not None:\n err_msg = \"%s signaled RPC for method %s was unsuccessful: %s.\" % (\n self.remote_service_coord, request[\"__method\"], error)\n logger.error(err_msg)\n result.set_exception(RPCError(error))\n else:\n result.set(response[\"__data\"])",
"def handle_response(response):\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()",
"def handle_rpc(self):\n while True: # loop handling\n self.rbuf.seek(0)\n length_prefix = self.rbuf.read(4)\n if len(length_prefix) < 4: # half-package\n break\n\n try:\n length, = struct.unpack(\"I\", length_prefix.encode(\"utf-8\"))\n except Exception as e:\n print(e.__traceback__)\n body = self.rbuf.read(length)\n if len(body) < length: # half-package\n break\n\n request = json.loads(body)\n input = request[\"in\"]\n params = request[\"params\"]\n handler = self.handlers[input]\n handler(params)\n # cut read buffer\n left = self.rbuf.getvalue()[length + 4:]\n self.rbuf = StringIO()\n self.rbuf.write(left)\n # move position to EOF\n self.rbuf.seek(0, 2)",
"def handle_response(self, response):\n with self.lock:\n req_id, status, message = response\n if req_id in self.pending_requests: # request may have timed out\n self.pending_requests[req_id].set((status, message))",
"def _queue_response(\n self,\n resp_address,\n req_id,\n status,\n message):\n\n incoming = self._incoming.get(resp_address)\n if not incoming:\n return\n\n data = incoming.packer.pack([req_id, status, message])\n try:\n resp_address.write(data)\n except pyuv.error.HandleClosedError:\n # This can happen if the client prematurely closes the\n # connection. Though the better thing to do would be to\n # detect this situation and call a call handler function\n # to take some action.\n pass",
"def handle_response(self, response):\n assert isinstance(response, CommandResponseObject)\n\n # Is there was a protocol error return the error.\n if response.is_protocol_failure():\n raise OffChainProtocolError.make(response.error)\n\n request_cid = response.cid\n\n # If we have already processed the response.\n request = self.committed_commands.try_get(request_cid)\n if request:\n # Check the reponse is the same and log warning otherwise.\n if request.response != response:\n excp = OffChainException(\n 'Got different responses with cid {request_cid}.'\n )\n excp.response1 = request.response\n excp.response2 = response\n raise excp\n # This request may have concurrent modification\n # read db to get latest status\n return self.committed_commands[request_cid].is_success()\n\n request = self.my_pending_requests.try_get(request_cid)\n if not request:\n raise OffChainException(\n f'Response for unknown cid {request_cid} received.'\n )\n\n # Read and write back response into request.\n request.response = response\n\n # Add the next command to the common sequence.\n self.committed_commands[request.cid] = request\n del self.my_pending_requests[request_cid]\n self.register_dependencies(request)\n self.apply_response(request)\n return request.is_success()",
"def _recv_msg(self, msg):\n # If this is a response, pass it along to the Remote object to be\n # processesd by the correct reply/error handler\n if is_response(msg):\n self._remote.resolve(msg)\n\n # Otherwise process the request from the remote RPC client.\n elif is_request(msg):\n method, params = msg['method'], msg['params']\n if method in self._protocol.keys():\n try:\n args, kwargs = self._reconcile_parameters(method, params)\n\n result = getattr(self, method)(*args, **kwargs)\n self._send_msg(json_rpc_result(result, None, msg['id']))\n except Exception as e:\n if isinstance(e, jsonrpc.JSONRPCError):\n raise e\n else:\n raise jsonrpc.ServerError(str(e))\n else:\n raise jsonrpc.MethodNotFound(\"Method not allowed\")\n else:\n raise jsonrpc.ParseError(\"Could not parse msg: %s\" % msg)",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def handleCallResponse(self, result, node):\n if result[0]:\n self.log.info(\"got response from %s, adding to router\" % node)\n _log.debug(\"got response from %s, adding to router\" % node)\n if self.router.isNewNode(node):\n self.transferKeyValues(node)\n self.router.addContact(node)\n else:\n self.log.debug(\"no response from %s, removing from router\" % node)\n _log.debug(\"no response from %s, removing from router\" % node)\n self.router.removeContact(node)\n return result",
"def reply_handler(msg):\n print(\"Server Response: %s, %s\" % (msg.typeName, msg))\n pass",
"def handle_response(self, order):\n print config.RESP_PROMPT + \" sending results of order %s...\" % (order.uuid)\n node = order.node\n responder_type = node[config.BEACON_TYPE_IND]\n params = node[config.PARAMS_IND]\n \n ip = params.get(config.NODE_IP_KEY)\n port = params.get(config.NODE_PORT_KEY)\n \n responder_class = self.response_map.get(responder_type) # get this from the beacon map based on beacon type\n responder = responder_class() # instantiate the object\n try:\n success = responder.send_response(params, order.response)\n except Exception, e:\n print \"%s Error connecting to %s:%s (%s)\" % (config.RESP_PROMPT, ip, port, e)\n success = False\n \n return success",
"def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)",
"def reply_handler(msg):\n print \"Server Response: %s, %s\" % (msg.typeName, msg)",
"def _process_json_rpc_message(self, msg, msg_id):\n future = self._pending_requests.get(msg_id, None)\n if future:\n error = msg.get('error', None)\n result = msg.get('result', None)\n if error:\n future.set_result(error)\n else:\n future.set_result(result)\n else:\n self._logger.error(\n \"Message received without a matching pending request! '{}'\".format(msg))",
"def process_AResponse(self) :\n while (1):\n str = self.recv(self.sock)\n if (len(str) > 0):\n response = amazon_pb2.AResponses()\n response.ParseFromString(str)\n print(response)\n # handle import new stock\n for arrive in response.arrived:\n things = arrive.things\n for thing in things:\n products = Whstock.objects.filter(pid = thing.id)\n if len(products) != 0:\n products[0].count = products[0].count + thing.count\n products[0].save()\n else :\n #need to specify world id\n whstock = Whstock()\n whstock.hid = arrive.whnum\n whstock.pid = thing.id\n whstock.dsc = thing.description\n whstock.count = thing.count\n whstock.save()\n # handle pack ready response\n #when ready send AU command to let UPS truck pickup,\n #use another thread for wait for UPS response\n #when receive response send ALoad command\n #when reveived loaded for Sim send AU command and let flag = 1;\n # tell UPS packages is ready and ask for trucks (provide destinaiton address)\n # tell warehouse to load when UPS trucks ready\n for currReady in response.ready:\n #save current state\n trans = Transaction.objects.get(ship_id = currReady)\n trans.ready = True\n trans.save()\n #connect to UPS\n ups_handler = threading.Thread(target=self.process_Uresponse, args=(trans,))\n ups_handler.start()\n self.AUCommand(trans, 0)\n print(\"first msg for UPS sent(to pickup)\")\n ups_handler.join()\n\n #load info from sim\n for load in response.loaded:\n #save current state\n trans = Transaction.objects.get(ship_id = load)\n trans.loaded = True\n trans.save()\n #connect to UPS\n self.AUCommand(trans, 1)\n print(\"second msg for UPS sent(get load success from sim world)\")",
"def process_response(self, sender, response):\n\t\tif sender is None or (sender.did_quit() and not self._is_resume(response)):\n\t\t\treturn self.process_invalid_response()\n\n\t\t# Generic logic for responding to any type of message goes here\n\t\t# if self._is_quit(response):\n\t\t# \treturn self.process_quit_response(sender)\n\t\tif self._is_quit(response):\n\t\t\treturn self.process_pause_response(sender)\n\t\telif sender.did_quit() and self._is_resume(response):\n\t\t\treturn self.process_resume_response(sender)\n\n\t\tlast_sent_message = Message.objects.get_last_sent_message_requiring_response(to=sender)\n\t\tif not last_sent_message:\n\t\t\treturn self.process_no_recent_message_response(sender, response)\n\n\t\tresponse_generator = ResponseCenter.RESPONSE_MAP.get(\n\t\t\tlast_sent_message._type, self.process_unrequired_response)\n\n\t\treturn response_generator(self, sender, last_sent_message, response)",
"def HandleRemoteCall(self, sock, address):\r\n sock.setblocking(True)\r\n rpcArgs = pickle.loads(sock.recv(BUFFER_SIZE))\r\n\r\n print(f\"Received RPC for {rpcArgs[RPC_ARG_REQUEST_TYPE]} to {address}\")\r\n\r\n value = {}\r\n if(rpcArgs[RPC_ARG_REQUEST_TYPE] == FIND_SUCCESSOR_REQUEST):\r\n value = self.FindSuccessor(rpcArgs[RPC_ARG_ID])\r\n elif (rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_PREDECESSOR_REQUEST):\r\n remoteNodePredecessor = self.predecessor\r\n self.predecessor = rpcArgs[RPC_ARG_NODE_INFO]\r\n value = remoteNodePredecessor\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_SUCCESSOR_REQUEST:\r\n value = self.fingerTable[1].Node\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == CLOSEST_PRECEDING_FINGER_REQUEST:\r\n value = self.ClosestPrecedingFinger(rpcArgs[RPC_ARG_ID])\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == UPDATE_FINGER_TABLE_REQUEST:\r\n self.UpdateFingerTable(rpcArgs[RPC_ARG_INDEX], rpcArgs[RPC_ARG_NODE_INFO])\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == ADD_OR_UPDATE_ENTRY_REQUEST:\r\n self.AddOrUpdateEntry(rpcArgs[RPC_ARG_KEY], rpcArgs[RPC_ARG_VALUE])\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_ENTRY_REQUEST:\r\n value = self.GetEntry(rpcArgs[RPC_ARG_KEY])\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == LOCAL_ADD_OR_UPDATE_ENTRY_REQUEST:\r\n self.LocalAddOrUpdateEntry(rpcArgs[RPC_ARG_KEY], rpcArgs[RPC_ARG_VALUE])\r\n elif rpcArgs[RPC_ARG_REQUEST_TYPE] == LOCAL_GET_ENTRY_REQUEST:\r\n value = self.LocalGetEntry(rpcArgs[RPC_ARG_KEY])\r\n\r\n sock.sendall(pickle.dumps(value))\r\n self.ShutDownSocket(sock)",
"def handle_execution_response(self, data, *, wait):\n ...",
"def test_handle_response_remove_request_from_pending(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = Value(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, self.target,\n 'value', time.time(), time.time() + 99999, self.version,\n PUBLIC_KEY, 'name', 'signature')\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertNotIn(uuid, lookup.pending_requests.keys())",
"def send_rpc_result(req, result):",
"def process_cmd(self, cmd):\n\n resp = self.COMMANDS[cmd.cmd](cmd)\n\n logger.debug(\"Resp: %s\" % resp)\n # send to resp_queue\n # if type == G.CTRL_TYPE:\n #\n # response = json.dumps((corr_id, routing_key, resp))\n # logger.debug(\"Sending response: %s\" % response)\n # self.out_queue.put(response)\n\n response = cmd.make_response(resp)\n logger.debug(\"Sending response: %s\" % response)\n self.out_queue.put(str(response))",
"def _callback(self, request):\r\n msgID = uuid4().hex\r\n event = Event()\r\n\r\n with self._pendingLock:\r\n self._pending[msgID] = event\r\n\r\n self._reactor.callFromThread(self.received, request._buff, msgID)\r\n\r\n # Block execution here until the event is set, i.e. a response has\r\n # arrived\r\n event.wait()\r\n\r\n with self._pendingLock:\r\n response = self._pending.pop(msgID, None)\r\n\r\n if not isinstance(response, Message):\r\n # TODO: Change exception?\r\n raise rospy.ROSInterruptException('Interrupted.')\r\n\r\n return response",
"def _handle_event(self, resp: Message):\n if resp.state_update == {}:\n _LOGGER.debug(\"No state update in message: %s\", resp.message)\n if self._avr.update_state(resp.state_update):\n self._avr.dispatcher.send(const.SIGNAL_STATE_UPDATE, resp.message)\n _LOGGER.debug(\"Event received: %s\", resp.state_update)\n if expected_response_items := self._expected_responses.popmatch(resp.group):\n _, expected_response = expected_response_items\n expected_response.set(resp)\n else:\n _LOGGER.debug(\"No expected response matched: %s\", resp.group)",
"def _on_response(self, response_type, p_api1, p_api2, double1, double2, ptr1, size1, ptr2, size2, ptr3, size3):\n if self.debug:\n print \"Response: \", ord(response_type)\n if response_type == OnConnectionStatus.value:\n self._on_connect_status(p_api2, chr(int(double1)), ptr1, size1)\n elif self._callbacks:\n for callback in self._callbacks:\n if response_type == OnRtnDepthMarketData.value:\n if self._is_market:\n callback.on_market_rtn_depth_market_data_n(p_api2, ptr1)\n elif response_type == OnRspQryInstrument.value:\n obj = cast(ptr1, POINTER(InstrumentField)).contents\n callback.on_trading_rsp_qry_instrument(p_api2, obj, bool(double1))\n elif response_type == OnRspQryTradingAccount.value:\n obj = cast(ptr1, POINTER(AccountField)).contents\n callback.on_trading_rsp_qry_trading_account(p_api2, obj, bool(double1))\n elif response_type == OnRspQryInvestorPosition.value:\n obj = cast(ptr1, POINTER(PositionField)).contents\n callback.on_trading_rsp_qry_investor_position(p_api2, obj, bool(double1))\n elif response_type == OnRspQrySettlementInfo.value:\n obj = cast(ptr1, POINTER(SettlementInfoField)).contents\n callback.on_trading_rsp_qry_settlement_info(p_api2, obj, bool(double1))\n elif response_type == OnRtnOrder.value:\n obj = cast(ptr1, POINTER(OrderField)).contents\n callback.on_trading_rtn_order(p_api2, obj)\n elif response_type == OnRtnTrade.value:\n obj = cast(ptr1, POINTER(TradeField)).contents\n callback.on_trading_rtn_trade(p_api2, obj)\n elif response_type == OnRtnQuote.value:\n obj = cast(ptr1, POINTER(QuoteField)).contents\n callback.on_trading_rtn_quote(p_api2, obj)\n elif response_type == OnRtnQuoteRequest.value:\n obj = cast(ptr1, POINTER(QuoteRequestField)).contents\n callback.on_trading_rtn_quote_request(p_api2, obj)\n elif response_type == OnRspQryHistoricalTicks.value:\n obj = cast(ptr1, POINTER(TickField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_ticks(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryHistoricalBars.value:\n obj = cast(ptr1, POINTER(BarField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_bars(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryInvestor.value:\n obj = cast(ptr1, POINTER(InvestorField)).contents\n callback.on_trading_rsp_qry_investor(p_api2, obj)\n elif response_type == OnFilterSubscribe.value:\n instrument = c_char_p(ptr1).value\n callback.on_trading_filter_subscribe(p_api2, ExchangeType(double1), size1, size2, size3, instrument)\n elif response_type == OnRtnError.value:\n obj = cast(ptr1, POINTER(ErrorField)).contents\n if self._is_market:\n callback.on_market_rsp_error(p_api2, obj, bool(double1))\n else:\n callback.on_trading_rsp_error(p_api2, obj, bool(double1))",
"def process(self):\n\n try:\n self._read_buffer += self._socket.recv(4096)\n except socket.error as exc:\n if exc.errno not in [errno.EAGAIN,\n errno.EWOULDBLOCK,\n errno.WSAEWOULDBLOCK]:\n raise\n response, self._read_buffer = Message.decode(self._read_buffer)\n # Check if terminating RESPONSE_VALUE with body 00 01 00 00\n if (response.type == Message.SERVERDATA_RESPONSE_VALUE and\n response.body.encode(\"ascii\") == \"\\x00\\x01\\x00\\x00\"):\n response = Message(self._response[0].id,\n self._response[0].type,\n \"\".join([r.body for r in self._response]))\n self._active_requests[response.id].response = response\n self._response = []\n self._active_requests[response.id]\n elif response.type == Message.SERVERDATA_RESPONSE_VALUE:\n self._response.append(response)\n elif response.type == Message.SERVERDATA_AUTH_RESPONSE:\n self._active_requests[self._response[0].id].response = response\n # Clear empty SERVERDATA_RESPONSE_VALUE sent before\n # SERVERDATA_AUTH_RESPONSE\n self._response = []\n self._active_requests[response.id]",
"def handle_call_response(self, result, node):\n if not result[0]:\n self.log(\"!! no response from %s, removing from router\", node)\n self.router.remove_contact(node)\n return result\n\n self.log(\"got successful response from %s\" % node)\n self.welcome_if_new(node)\n return result",
"def on_response(self, response):\n if log.isEnabledFor(logging.DEBUG):\n log.debug(\n 'RESPONSE MESSAGE RECEIVED %s %s',\n repr(self),\n response,\n )\n\n self.response_queue.put(response)",
"def handleCallResponse(self, result, node):\n if not result[0]:\n log.warning(\"no response from %s, removing from router\", node)\n self.router.removeContact(node)\n return result\n\n log.info(\"got successful response from %s\", node)\n self.welcomeIfNewNode(node)\n return result",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def process_hub_reply(self, hub_reply):\n\n # Typical response from hub is \"OK\" if there are no user or\n # automated librian requests. Almost all responses are just \"OK\"\n # therefore the default process_hub_reply is \"pass\"\n # TODO Respond to hub repies if they are other than 'OK'\n # for example, push \"send 10 frames\" request onto deque\n # and then add \"do requested extra frames\" to detectors loop\n # so that images get sent even though there is no routine reason\n pass",
"async def _response_handler(self):",
"def _handle_call(self, call):\n try:\n result = self.execute_call(call)\n if not call.want_response:\n return\n if isiter(result):\n for to_yield in result:\n self.cxn.send_message((\"yield\", to_yield))\n self.cxn.send_message((\"stop\", ))\n else:\n self.cxn.send_message((\"return\", result))\n except ConnectionError:\n raise\n except Exception, e:\n if call.want_response:\n self.cxn.send_message((\"raise\", self._serialize_exception(e)))\n raise",
"def _on_reply(self, cb, reply_tag='', answer_tag='', correlation_id=''):\n result = yield cb\n encoded_result = json.dumps(result)\n\n # We want to have some discard ability for a really fatal situations\n # however, I still have no idea how to select Exceptions\n # (handled by _wrap_handler, for example) that are really fatal\n # and corresponding request should not be marked\n # as acked. It is practically impossible to handle every exception in\n # python. That is one thing I like implemented good in go.\n # So, I am not really sure, but it appears this `discard` is pretty\n # useless in general\n if \"discard\" in result and result[\"discard\"]:\n LOGGER.debug('Discarding result to %s' % reply_tag)\n return\n\n # I really would like to see a way to make these two actions atomic\n self._channel.basic_publish(\n exchange='',\n routing_key=answer_tag,\n properties=pika.BasicProperties(correlation_id=correlation_id),\n body=encoded_result)\n self._channel.basic_ack(reply_tag)",
"def _send_and_response(self, addr, msg):\n self._namefixer(msg)\n return send_and_receive(addr, msg, 30) # manual timeout !!!!! fix it!",
"def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)",
"def receive_response(self, private_key, responder_id, msg_tag, response):\n return self._handle_response(private_key, responder_id, msg_tag, response)",
"def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])",
"def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply",
"async def parse_handle_response(self, json_response):\n try:\n vasp = self.vasp\n other_key = vasp.info_context.get_peer_compliance_verification_key(\n self.other_address_str\n )\n message = await other_key.verify_message(json_response)\n response = json.loads(message)\n response = CommandResponseObject.from_json_data_dict(\n response, JSONFlag.NET\n )\n\n return self.handle_response(response)\n\n except OffChainInvalidSignature as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'Signature verification failed. OffChainInvalidSignature: {e}'\n )\n raise e\n except JSONParsingError as e:\n logger.warning(\n f'(other:{self.other_address_str}) JSONParsingError: {e}'\n )\n raise e\n except OffChainException or OffChainProtocolError as e:\n logger.warning(\n f'(other:{self.other_address_str}) '\n f'OffChainException/OffChainProtocolError: {e}',\n )\n raise e",
"def response_received(self, event):\n super().response_received(event)\n\n stream_id = event.stream_id\n response_stream = self.receive_streams.get(stream_id)\n if response_stream is None:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n return\n\n headers = response_stream.headers\n\n if int(headers.get(\"grpc-status\", 0)) > 0:\n error = GrpcError.from_headers(headers)\n response_stream.close(error)\n del self.receive_streams[stream_id]",
"def process_one_result(reply):\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])",
"def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))",
"def _response_success(self, msg, msgID):\r\n if not self._status:\r\n # Can not help it if the response takes some time and in the mean\r\n # time the interface is disabled; therefore, don't raise an error\r\n # instead just skip sending the response\r\n return\r\n\r\n self._conn.sendMessage(self._iTag, self._clsName, msg, msgID)",
"async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"executionReport\": # Order update.\n if msg[\"s\"] != self._raw_symbol:\n return\n order_no = \"{}_{}\".format(msg[\"i\"], msg[\"c\"])\n if msg[\"X\"] == \"NEW\":\n status = ORDER_STATUS_SUBMITTED\n elif msg[\"X\"] == \"PARTIALLY_FILLED\":\n status = ORDER_STATUS_PARTIAL_FILLED\n elif msg[\"X\"] == \"FILLED\":\n status = ORDER_STATUS_FILLED\n elif msg[\"X\"] == \"CANCELED\":\n status = ORDER_STATUS_CANCELED\n elif msg[\"X\"] == \"REJECTED\":\n status = ORDER_STATUS_FAILED\n elif msg[\"X\"] == \"EXPIRED\":\n status = ORDER_STATUS_FAILED\n else:\n logger.warn(\"unknown status:\", msg, caller=self)\n return\n order = self._orders.get(order_no)\n if not order:\n info = {\n \"platform\": self._platform,\n \"account\": self._account,\n \"strategy\": self._strategy,\n \"order_no\": order_no,\n \"action\": msg[\"S\"],\n \"order_type\": msg[\"o\"],\n \"symbol\": self._symbol,\n \"price\": msg[\"p\"],\n \"quantity\": msg[\"q\"],\n \"ctime\": msg[\"O\"]\n }\n order = Order(**info)\n self._orders[order_no] = order\n order.remain = float(msg[\"q\"]) - float(msg[\"z\"])\n order.status = status\n order.utime = msg[\"T\"]\n if self._order_update_callback:\n SingleTask.run(self._order_update_callback, copy.copy(order))",
"def rpc_sendback(rpc_flag):\n credential = pika.PlainCredentials('guest', 'guest')\n rpc_connection = pika.BlockingConnection(pika.ConnectionParameters(\n host='localhost', port=5672, virtual_host='/', credentials=credential))\n rpc_channel = rpc_connection.channel()\n rpc_channel.queue_declare(queue=str(rpc_flag))\n #send message to the command center using basic_publish\n if rpc_flag == \"c02\":\n rpc_channel.basic_publish(exchange='', routing_key=str(\n rpc_flag), body='Drone has reached the delivery address')\n elif rpc_flag == \"c03\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has unloaded the item')\n elif rpc_flag == \"c04\":\n rpc_channel.basic_publish(exchange='', routing_key=str(rpc_flag),\n body='Drone has reached the parking spot and available for next instruction')",
"def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)",
"def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0",
"def _handle( self, state, msg ):\n\t\tstate.requests[ msg.id ] = msg\n\t\tstatistics.requests.new()\n\t\tCORE.info( 'Incoming request of type %s' % msg.command )\n\t\tif not state.authenticated and msg.command != 'AUTH':\n\t\t\tres = Response( msg )\n\t\t\tres.status = BAD_REQUEST_UNAUTH\n\t\t\tself._response( res, state )\n\t\telif msg.command == 'AUTH':\n\t\t\tstate.authResponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tstate.authenticate( msg.body[ 'username' ], msg.body[ 'password' ] )\n\t\t\texcept ( TypeError, KeyError ), e:\n\t\t\t\tstate.authResponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\tstate.authResponse.message = 'insufficient authentification information'\n\t\telif msg.command == 'GET' and ( 'ucr' in msg.arguments or 'info' in msg.arguments ):\n\t\t\tresponse = Response( msg )\n\t\t\tresponse.result = {}\n\t\t\tresponse.status = SUCCESS\n\t\t\tif 'ucr' in msg.arguments:\n\t\t\t\tif not isinstance(msg.options, (list, tuple)):\n\t\t\t\t\traise InvalidOptionsError\n\t\t\t\tfor value in msg.options:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif not value:\n\t\t\t\t\t\t\t# make sure that 'value' is non-empty\n\t\t\t\t\t\t\tCORE.warn('Empty UCR variable requested. Ignoring value...')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif value.endswith('*'):\n\t\t\t\t\t\t\tvalue = value[ : -1 ]\n\t\t\t\t\t\t\tfor var in filter( lambda x: x.startswith( value ), ucr.keys() ):\n\t\t\t\t\t\t\t\tresponse.result[ var ] = ucr.get( var )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresponse.result[ value ] = ucr.get( value )\n\t\t\t\t\texcept ( TypeError, IndexError, AttributeError ), e:\n\t\t\t\t\t\tCORE.warn('Invalid UCR variable requested: %s' % (value,))\n\t\t\t\t\t\tresponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\t\t\tresponse.message = _('Invalid UCR variable requested: %s') % (value,)\n\n\t\t\telif 'info' in msg.arguments:\n\t\t\t\ttry:\n\t\t\t\t\tfd = gzip.open( '/usr/share/doc/univention-management-console-server/changelog.Debian.gz' )\n\t\t\t\t\tline = fd.readline()\n\t\t\t\t\tfd.close()\n\t\t\t\t\tmatch = MagicBucket.CHANGELOG_VERSION.match( line )\n\t\t\t\t\tif not match:\n\t\t\t\t\t\traise IOError\n\t\t\t\t\tresponse.result[ 'umc_version' ] = match.groups()[ 0 ]\n\t\t\t\t\tresponse.result[ 'ucs_version' ] = '{0}-{1} errata{2} ({3})'.format( ucr.get( 'version/version', '' ), ucr.get( 'version/patchlevel', '' ), ucr.get( 'version/erratalevel', '0' ), ucr.get( 'version/releasename', '' ) )\n\t\t\t\t\tresponse.result[ 'server' ] = '{0}.{1}'.format( ucr.get( 'hostname', '' ), ucr.get( 'domainname', '' ) )\n\t\t\t\t\tresponse.result[ 'ssl_validity_host' ] = int( ucr.get( 'ssl/validity/host', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\t\tresponse.result[ 'ssl_validity_root' ] = int( ucr.get( 'ssl/validity/root', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\texcept IOError:\n\t\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\t\t\tpass\n\n\t\t\tself._response( response, state )\n\t\telif msg.command == 'STATISTICS':\n\t\t\tresponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tpwent = pwd.getpwnam( state.username )\n\t\t\t\tif not pwent.pw_uid in ( 0, ):\n\t\t\t\t\traise KeyError\n\t\t\t\tCORE.info( 'Sending statistic data to client' )\n\t\t\t\tresponse.status = SUCCESS\n\t\t\t\tresponse.result = statistics.json()\n\t\t\texcept KeyError:\n\t\t\t\tCORE.info( 'User not allowed to retrieve statistics' )\n\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\tself._response( response, state )\n\t\telse:\n\t\t\t# inform processor\n\t\t\tif not state.processor:\n\t\t\t\tstate.processor = Processor( *state.credentials() )\n\t\t\t\tcb = notifier.Callback( self._response, state )\n\t\t\t\tstate.processor.signal_connect( 'response', cb )\n\t\t\tstate.processor.request( msg )",
"def finished(self, reply):\n pass",
"def process_response(self, id, result):\n raise NotImplementedError('process_response not implemented in BaseService')",
"def _handle(self, msg: Message) -> Message:\n\n # skip executor for non-DataRequest\n if msg.envelope.request_type != 'DataRequest':\n if msg.request.command == 'TERMINATE':\n raise RuntimeTerminated()\n self.logger.debug(f'skip executor: not data request')\n return msg\n\n req_id = msg.envelope.request_id\n num_expected_parts = self._get_expected_parts(msg)\n self._data_request_handler.handle(\n msg=msg,\n partial_requests=[m.request for m in self._pending_msgs[req_id]]\n if num_expected_parts > 1\n else None,\n peapod_name=self.name,\n )\n\n return msg",
"def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)",
"def handle_response(self):\r\n call_if_not_none_and_callable(self._eventHandlers.popleft(),\r\n response=self.response)",
"def handle_ipc_call(self):\n (fn_name, args, kwargs) = self._mpsing_server_conn.recv()\n try:\n res = getattr(self, fn_name).orig_fn(self, *args, **kwargs)\n # pylint: disable=broad-except\n except BaseException as exc:\n res = exc\n self._mpsing_server_conn.send(res)",
"def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.",
"def obj_received(self, obj):\n\n # TODO do something like handler registry\n\n if isinstance(obj, pb.Ping):\n self.handle_ping(obj)\n\n elif isinstance(obj, pb.Pong):\n self.handle_pong(obj)\n\n elif isinstance(obj, pb.ACS):\n if self.factory.config.failure != 'omission':\n res = self.factory.acs.handle(obj, self.remote_vk)\n self.process_acs_res(res, obj)\n\n elif isinstance(obj, pb.TxReq):\n self.factory.tc_runner.handle_tx_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.TxResp):\n self.factory.tc_runner.handle_tx_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationReq):\n self.factory.tc_runner.handle_validation_req(obj, self.remote_vk)\n\n elif isinstance(obj, pb.ValidationResp):\n self.factory.tc_runner.handle_validation_resp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.SigWithRound):\n self.factory.tc_runner.handle_sig(obj, self.remote_vk)\n\n elif isinstance(obj, pb.CpBlock):\n self.factory.tc_runner.handle_cp(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Cons):\n self.factory.tc_runner.handle_cons(obj, self.remote_vk)\n\n elif isinstance(obj, pb.AskCons):\n self.factory.tc_runner.handle_ask_cons(obj, self.remote_vk)\n\n # NOTE messages below are for testing, bracha/mo14 is normally handled by acs\n\n elif isinstance(obj, pb.Bracha):\n if self.factory.config.failure != 'omission':\n self.factory.bracha.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Mo14):\n if self.factory.config.failure != 'omission':\n self.factory.mo14.handle(obj, self.remote_vk)\n\n elif isinstance(obj, pb.Dummy):\n logging.info(\"NODE: got dummy message from {}\".format(b64encode(self.remote_vk)))\n\n else:\n raise AssertionError(\"invalid message type {}\".format(obj))\n\n self.factory.recv_message_log[obj.__class__.__name__] += obj.ByteSize()",
"def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))",
"def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure",
"def _parse_reply(self, msg_list): #{\n logger = self.logger\n\n if len(msg_list) < 4 or msg_list[0] != b'|':\n logger.error('bad reply: %r' % msg_list)\n return None\n\n msg_type = msg_list[2]\n data = msg_list[3:]\n result = None\n srv_id = None\n\n if msg_type == b'ACK':\n srv_id = data[0]\n elif msg_type in (b'OK', b'YIELD'):\n try:\n result = self._serializer.deserialize_result(data)\n except Exception, e:\n msg_type = b'FAIL'\n result = e\n elif msg_type == b'FAIL':\n try:\n error = jsonapi.loads(msg_list[3])\n if error['ename'] == 'StopIteration':\n result = StopIteration()\n elif error['ename'] == 'GeneratorExit':\n result = GeneratorExit()\n else:\n result = RemoteRPCError(error['ename'], error['evalue'], error['traceback'])\n except Exception, e:\n logger.error('unexpected error while decoding FAIL', exc_info=True)\n result = RPCError('unexpected error while decoding FAIL: %s' % e)\n else:\n result = RPCError('bad message type: %r' % msg_type)\n\n return dict(\n type = msg_type,\n req_id = msg_list[1],\n srv_id = srv_id,\n result = result,\n )",
"def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )",
"def response_received(self, ignored):\n self._received += 1",
"def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)",
"def handle_send_operation_response_thread(self):\n\n while not self.done.isSet():\n reset_watchdog()\n\n operation_ids = {}\n while True:\n try:\n operation_response = self.outgoing_operation_response_queue.get_nowait()\n except queue.Empty:\n break\n if operation_response.device_id not in operation_ids:\n operation_ids[operation_response.device_id] = []\n # it's possible to get the same eventhub message twice, especially if we have to reconnect\n # to refresh credentials. Don't send the same service ack twice.\n if (\n operation_response.operation_id\n not in operation_ids[operation_response.device_id]\n ):\n operation_ids[operation_response.device_id].append(\n operation_response.operation_id\n )\n\n for device_id in operation_ids:\n\n device_data = self.device_list.try_get(device_id)\n\n if device_data:\n logger.info(\n \"Send operationResponse for device_id = {}: {}\".format(\n device_id, operation_ids[device_id]\n ),\n extra=custom_props(device_id, device_data.run_id),\n )\n\n message = json.dumps(\n {\n Fields.CMD: Commands.OPERATION_RESPONSE,\n Fields.SERVICE_INSTANCE_ID: service_instance_id,\n Fields.RUN_ID: device_data.run_id,\n Fields.OPERATION_IDS: operation_ids[device_id],\n }\n )\n\n self.outgoing_c2d_queue.put(\n OutgoingC2d(\n device_id=device_id,\n message=message,\n props=Const.JSON_TYPE_AND_ENCODING,\n )\n )\n\n # TODO: this should be configurable\n # Too small and this causes C2D throttling\n self.force_send_operation_response.wait(timeout=15)\n self.force_send_operation_response.clear()",
"def response(self, command_code, data):\n name, request_func, response_func = afpcommands.commands[command_code]\n return response_func(data)",
"def handle_process_pipes(self, message, response_type):\n # Check slave pipes for message\n assertTrue(self.monitor_slave.poll(3))\n if self.monitor_slave.poll():\n assertEqual(self.monitor_slave.recv(), message)\n\n assertTrue(self.consumer_slave.poll(3))\n if self.consumer_slave.poll():\n assertEqual(self.consumer_slave.recv(), message)\n\n # Send responses on slaves to heartbeat\n response = Message(name=None,\n date_time=None,\n type=response_type)\n self.monitor_slave.send(response)\n self.consumer_slave.send(response)\n\n # check server socket for heartbeat aggregate packet\n size, received = self.get_message_from_queue()\n\n self.assertIsNotNone(received)\n if received is None:\n return\n\n self.assertIsNotNone(size)\n if size is None:\n return\n\n self.assertIsInstance(received, Message)\n\n self.assertEqual(message.type, response_type)\n self.assertEqual(message.id, 0)\n self.assertEqual(message.name, 'Heartbeat')\n\n self.assertIsInstance(message.payload(0), list)\n self.assertEqual(len(message.payload(0)), 2)\n\n self.assertIsInstance(message.payload(1), set)\n self.assertEqual(len(message.payload(1)), 0)",
"def _response_switch(self, message):\n if message.relay_state == b\"D8\":\n if not self._relay_state:\n self._relay_state = True\n self.do_callback(SWITCH_RELAY[\"id\"])\n else:\n if self._relay_state:\n self._relay_state = False\n self.do_callback(SWITCH_RELAY[\"id\"])",
"def process_response(self,response):\n return self.action.process_response(response)",
"def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)",
"def get_response(self, request, decision, ext_port):\n if decision:\n return decision\n\n self.send_request(request, ext_port)\n\n return self.receive_response()",
"def test_handle_response_wrong_message_type(self):\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n contact = lookup.shortlist[0]\n msg = OK(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._blacklist = mock.MagicMock()\n lookup._handle_error = mock.MagicMock()\n lookup._handle_response(uuid, contact, response)\n lookup._blacklist.assert_called_once_with(contact)\n self.assertEqual(lookup._handle_error.call_count, 1)\n args = lookup._handle_error.call_args[0]\n self.assertEqual(args[0], uuid)\n self.assertEqual(args[1], contact)\n self.assertIsInstance(args[2], TypeError)\n self.assertEqual(args[2].args[0],\n \"Unexpected response type from {}\".format(contact))",
"def apply_response(self, request):\n assert request.response is not None\n response = request.response\n\n other_addr = self.get_other_address()\n\n self.processor.process_command(\n other_addr=other_addr,\n command=request.command,\n cid=request.cid,\n status_success=request.is_success(),\n error=response.error if response.error else None\n )",
"async def receive_result(\n self, rpc_message: RpcMessage, return_path: str, options: dict, bus_client: \"BusClient\"\n ) -> ResultMessage:\n raise NotImplementedError()",
"def process_ipc_response(ipc_response):\n if not ipc_response:\n return make_response(jsonify({\"message\": \"No response from monitoring service\"}), 503)\n\n return_code = 200 if ipc_response[\"result\"] else 500\n\n # copy values to response\n response = {}\n if \"message\" in ipc_response:\n response[\"message\"] = ipc_response[\"message\"]\n\n response |= ipc_response.get(\"value\", {})\n response |= ipc_response.get(\"other\", {})\n\n logging.info(\"Code: %s\", return_code)\n logging.info(\"Response: %s\", response)\n\n return make_response(jsonify(response), return_code)",
"def _handle_result(self, result):\n if self.result_callback != None:\n #Call the result callback but expect failure.\n try:\n self.result_callback(result, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in result handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.logg.error(\"Error: no on_result defined for '{cmd!r}' command result: {res!r}.\",cmd=self.command,res=result)",
"def get_async_response(self,message): \n index = self.async_query_buffer.index(message)\n #print('**********')\n #print ('requesting ' + message + ' at index ' + str(index))\n b = True\n try:\n response = self.async_reply_buffer[index]\n if response.endswith('\\n'):\n response = self.async_reply_buffer.pop(index)\n else:\n b = False\n response = 'EMPTY'\n except IndexError: \n #print('response not available yet!!')\n response = 'EMPTY'\n b = False\n if b: \n #print('got reply:')\n #print(response)\n query = self.async_query_buffer.pop(index)\n #print('for query:')\n #print(query)\n #print('Buffers:')\n #print(self.async_reply_buffer)\n #print(self.async_query_buffer)\n #print('_________________')\n\n return response",
"def check_response(func):\n\n def _wrapper(*args, **kw):\n with _mutex:\n _message[:] = [] # why Python list has no clear() ??\n\n result, status = func(*args, **kw)\n if not status.success:\n err_msg = \"Error running command [%s], reason: %s\" % (func.__name__, status.reason)\n raise error.BigflowRPCException(err_msg)\n\n return result\n return _wrapper",
"def run(self, session):\n rpc = None\n if session['client']['event'] == 'CONNECTION_REQUEST':\n self.add_nb_queue_to_session_queue(session)\n\n while rpc is None and session['queue']:\n try:\n # Loop through queue until there is an RPC to send, or until\n # there are no more RPCs queued, or until an error occurs\n session['rpc']['method'] = session['queue'].pop(0)\n rpc = session['rpc']['method'].send_request(session)\n except ClientMethodException:\n # Failed to send this RPC, move on to the next\n LOG.debug(\"Error during preparation of client method: %s\" % str(session['rpc']['method']))\n continue\n except Exception:\n traceback.print_exc()\n LOG.debug(\"Unexpected error during preparation of client method: %s\" % str(session['rpc']['method']))\n return RPCS.SendingRpc, None\n\n if rpc is not None:\n # RPC ready: Send it and ExpectResponse\n return RPCS.ExpectResponse, rpc\n else:\n # If there are no (more) RPCs to send, log ok\n # and send done, indicating communication is complete\n session['log'] = {'rc': 'ok', 'msg': ''}\n session['db'].clear_dirtyflag(session['client']['cid'])\n return RPCS.Listening, {'method': 'done'}",
"def _handle_one_message(self):\n\n type, data = self.cxn.recv_message()\n\n if type.startswith(\"call\"):\n if len(data) != 3:\n message = (type, data)\n raise MessageError.invalid(message, \"incorrect number of args\")\n flags = {\n \"want_response\": type == \"call\",\n }\n call = Call(data[0], data[1], data[2], flags, self.client)\n self._handle_call(call)\n return False\n\n raise MessageError.bad_type(type)",
"async def handle_request():\n nonlocal process, process_task\n logger.debug(\"Waiting for request\")\n request = await queue.get()\n\n if request.name == RequestTypes.run_process:\n assert process is None, \"Process must not have been started\"\n process_state = request.contents\n process = self._start_callback(process_state)\n process_task = asyncio.create_task(process.wait())\n pid = process.pid\n logger.debug(\"Running process in handler: %d\", pid)\n await connection.send(Response(pid))\n\n elif request.name == RequestTypes.wait_process_done:\n assert process is not None, \"Process must have been started\"\n logger.debug(\"Waiting for process to exit\")\n # We don't want the process.wait() task to be cancelled in case\n # our connection gets broken.\n exitcode = await asyncio.shield(process_task)\n logger.debug(\"Result: %d\", exitcode)\n await connection.send(Response(exitcode))\n\n return True",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"def get_rpc_response(self, msgobj, dataobj):\n funcname = msgobj['func']\n args = msgobj.get('args', [])\n kwargs = msgobj.get('kwargs', {})\n auth = False\n if self.authorized_functions is not None \\\n and funcname not in self.authorized_functions:\n resp = self.ph.pack_rpc(self.ph.error_obj('unauthorized access'))\n return resp, []\n\n if hasattr(self, 'can_' + funcname):\n auth = self.can_funcname(*args, **kwargs)\n if not auth:\n resp = self.ph.pack_rpc(self.ph.error_obj('unauthorized access'))\n return resp, []\n func = getattr(self, funcname)\n if len(dataobj) > 0:\n kwargs['data'] = dataobj\n responseobj, dataobj = func(*args, **kwargs)\n return self.ph.pack_rpc(responseobj), dataobj",
"async def data_received(self, data: bytes) -> None:\n\n self.response_message.set_result(data)",
"async def _receive_updated_response(self, data):\n serialized_text_responses = await serialize_text_algo_api_response(data)\n await self.send_serialized_data(serialized_text_responses)",
"async def call_rpc(self, rpc_message: RpcMessage, options: dict, bus_client: \"BusClient\"):\n raise NotImplementedError()",
"def _HandleResponse(unused_request_id, unused_response, exception):\n if exception is not None:\n raise exception",
"def usingHandler(self, cmd):\n self.command_handler.handle_command(cmd)\n while msg_queue.empty() is False:\n self.writeresponse(msg_queue.get())",
"def cbBody(bodystring):\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()",
"def _process_pool_status_response(self, buf, length):\n\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'success')\n\t\tvalue, index = self._getNextString(buf, 0)\n\t\tif value == 'ArraySize':\n\t\t\tindex += 1\n\t\t\tci_size = buf[index:]\n\t\t\tci_size = self.bytes_to_int(ci_size)\n\n\t\t\tself._setResultStatus(ResultStateType.INCOMPLETE)\n\t\t\tself.pcpResInfo.pcp_add_json_result('config', list())\n\t\telif value == 'ProcessConfig':\n\t\t\tindex += 1\n\t\t\tif self.PCPResultStatus(self.pcpResInfo) != ResultStateType.INCOMPLETE:\n\t\t\t\tself.pcp_internal_error('command failed. invalid response')\n\t\t\t\tself.pcpResInfo.pcp_add_json_result('command_status', 'failed')\n\t\t\t\tself._setResultStatus(ResultStateType.BAD_RESPONSE)\n\n\t\t\tstatus = POOL_REPORT_CONFIG()\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.name = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.value = value\n\n\t\t\tvalue, index = self._getNextString(buf, index)\n\t\t\tif value:\n\t\t\t\tindex += 1\n\t\t\t\tstatus.desc = value\n\n\t\t\tself.pcpResInfo.pcp_append_json_result('config', status.get_json())\n\t\t\tself._setResultData(self.pcpResInfo, status)\n\t\telif value == 'CommandComplete':\n\t\t\tself._setResultStatus(ResultStateType.COMMAND_OK)",
"def on_response(context, response_type, response):\n if response.status in (OrderStatus.SUCCEED.value, OrderStatus.PARTED.value) and response_type == 0:\n if response.exe_volume == 0:\n return\n if response.direction == Direction.BUY.value:\n if response.open_close == OpenClose.OPEN.value:\n context.pos_dict[response.symbol]['long_volume'] += response.exe_volume\n elif response.open_close in (OpenClose.CLOSE.value, OpenClose.CLOSE_YES.value):\n context.pos_dict[response.symbol]['short_volume'] -= response.exe_volume\n elif response.direction == Direction.SELL.value:\n if response.open_close == OpenClose.OPEN.value:\n context.pos_dict[response.symbol]['short_volume'] += response.exe_volume\n elif response.open_close in (OpenClose.CLOSE.value, OpenClose.CLOSE_YES.value):\n context.pos_dict[response.symbol]['long_volume'] -= response.exe_volume",
"def _process(connection, process):\n try:\n command = connection.recv()\n except IOError as e:\n return \"Connection receive error: %s\" %(str(e))\n\n if command == __quit_command:\n try:\n connection.send(\"Exited server.\")\n finally:\n connection.close()\n return __quit_command\n\n #print \"Processing command\", command\n data = process(command)\n\n try:\n connection.send(data)\n except IOError as e:\n return \"Connection send error: %s\" %(str(e))\n\n connection.close()",
"def response(self, context, message):\r\n return True",
"def handle_comm_msg(self, message):\n msg = self._unwrap(message)\n\n try:\n self.geonotebook._recv_msg(msg)\n\n except jsonrpc.JSONRPCError as e:\n self.geonotebook._send_msg(\n json_rpc_result(None, e.tojson(), msg['id'])\n )\n self.log.error(u\"JSONRPCError (%s): %s\" % (e.code, e.message))\n\n except Exception as e:\n self.log.error(u\"Error processing msg: {}\".format(str(e)))",
"def put_response(self, msg: Any) -> None:\n # redis.Connection.__del__ might call self.close at any time, which\n # will set self.responses to None. We assume this will happen\n # atomically, and the code below then protects us against this.\n responses = self.responses\n if responses:\n responses.put(msg)",
"def _on_response(self):\n request = self._requests.pop(0)\n try:\n request[-1].cancel()\n left = request[-1].end - Engine.instance().time\n except Exception:\n left = request[5]\n pass\n\n response = self.current_response\n\n close_after = response.headers.get('Connection', '') == 'close'\n close_after &= self.keep_alive\n\n # Is this a 100 Continue?\n if response.status == 100:\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Did we catch a redirect?\n if response.status in (301,302) and request[9] <= self.max_redirects:\n # Generate a new request, using the new URL.\n new_url = urlparse.urljoin(response.full_url,\n response.headers['Location'])\n\n new_headers = request[3].copy()\n del new_headers['Host']\n\n new_req = self._add_request(request[0], new_url, new_headers,\n request[4], left, False)\n new_req[6] = request[6]\n new_req[7] = request[7]\n new_req[9] = request[9] + 1\n\n new_req.append(\n Engine.instance().defer(left, self._request_timeout, new_req))\n\n self._requests.insert(0, new_req)\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Try converting to unicode?\n if self.unicode:\n content_type = response.headers.get('Content-Type','')\n if 'charset=' in content_type:\n content_type, _, encoding = content_type.partition('charset=')\n try:\n response.body = response.body.decode(encoding)\n except (LookupError, UnicodeDecodeError):\n pass\n\n # Determine the handler function to use.\n if callable(request[6]):\n func = request[6]\n else:\n func = self.on_response\n\n # Call the handler function.\n try:\n func(0, response)\n except Exception:\n log.exception('Error in HTTP response handler.')\n\n # Process the next request.\n self.current_response = None\n\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()",
"def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg",
"def async_handle_message(self, msg: dict) -> None:\n if msg[\"type\"] == \"result\":\n future = self._result_futures.get(msg[\"messageId\"])\n\n if future is None:\n self._logger.warning(\n \"Received result for unknown message: %s\", msg[\"messageId\"]\n )\n return\n\n if msg[\"success\"]:\n future.set_result(msg[\"result\"])\n return\n\n future.set_exception(FailedCommand(msg[\"messageId\"], msg[\"errorCode\"]))\n return\n\n if self.driver is None:\n raise InvalidState(\"Did not receive state as first message\")\n\n if msg[\"type\"] != \"event\":\n # Can't handle\n return\n\n event = Event(type=msg[\"event\"][\"event\"], data=msg[\"event\"])\n self.driver.receive_event(event)",
"async def process(self, msg):\n logger.debug(\"msg:\", json.dumps(msg), caller=self)\n e = msg.get(\"e\")\n if e == \"ORDER_TRADE_UPDATE\": # Order update.\n self._update_order(msg[\"o\"])",
"def process_response(self, request, response):\n return response",
"def process_response(self, request, response):\n return response"
] |
[
"0.7039859",
"0.7026062",
"0.690414",
"0.6804377",
"0.6649933",
"0.6590539",
"0.6588729",
"0.6510268",
"0.65001494",
"0.63618594",
"0.63517755",
"0.6349003",
"0.63440263",
"0.63440263",
"0.6343718",
"0.6316436",
"0.6311576",
"0.6277061",
"0.6267772",
"0.6266023",
"0.6261978",
"0.622583",
"0.621413",
"0.62121546",
"0.6184484",
"0.6178546",
"0.61781174",
"0.6161475",
"0.61610144",
"0.6140555",
"0.6128956",
"0.60684526",
"0.6063099",
"0.6060829",
"0.6060405",
"0.60566044",
"0.604562",
"0.6037678",
"0.6028818",
"0.60053045",
"0.60020846",
"0.5990548",
"0.59730554",
"0.5954419",
"0.5947115",
"0.5945169",
"0.5926074",
"0.59155935",
"0.59012514",
"0.5900163",
"0.5894921",
"0.5890964",
"0.5868744",
"0.58634865",
"0.58581233",
"0.5842204",
"0.58286846",
"0.5828516",
"0.58279073",
"0.58076364",
"0.58021784",
"0.5789152",
"0.57743174",
"0.57725245",
"0.57659584",
"0.57346",
"0.57305276",
"0.57291037",
"0.5721121",
"0.5720857",
"0.5718701",
"0.5714805",
"0.5702547",
"0.5702471",
"0.5701003",
"0.56974417",
"0.5693805",
"0.56826127",
"0.5679831",
"0.56783104",
"0.5677112",
"0.56721175",
"0.5663594",
"0.56598604",
"0.5658446",
"0.5653001",
"0.5646098",
"0.56374264",
"0.5630016",
"0.56260514",
"0.5624486",
"0.56199205",
"0.5610033",
"0.5600004",
"0.5598723",
"0.55959314",
"0.55928236",
"0.55855554",
"0.55846643",
"0.55846643"
] |
0.67964923
|
4
|
Handle a input from a device
|
def handle_message(self, session, message):
# Handle an RPC call
# Reason should come from inform call.
response = {}
if message['method'] == 'done' and message['id'] is None:
# Here we switch roles, becoming RPC Client
next_state, response = RPCS.SendingRpc, None
else:
# We have a valid method.
# (VALID_METHODS checked in rpcsd:parse_message)
next_state = RPCS.ExpectRpc
response['error'] = {'code': -31998, 'message': 'Wrong request'}
response['id'] = message['id']
return next_state, response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_input(self, event):\n pass",
"def listen(device_input, callback):\n while True:\n time.sleep(0.01)\n event = readControlDataRaw(device_input)\n (control_id, control_type, event_type, value) = parseControlEvent(event)\n if control_id != -1:\n callback(control_id, control_type, event_type, value)",
"def readInput():\n for e in pygame.event.get():\n try:\n id = e.joy\n print id\n dev = InputDevice.devs[id]\n if e.type == JOYBUTTONDOWN:\n if e.button == dev._accBtn:\n dev._speed = 1\n elif e.button == dev._revBtn:\n dev._speed = -1\n elif e.button == dev._powBtn:\n dev._boost = 2\n elif e.type == JOYBUTTONUP:\n if e.button == dev._accBtn:\n dev._speed = 0\n elif e.button == dev._revBtn:\n dev._speed = 0\n elif e.button == dev._powBtn:\n dev._boost = 1\n elif e.type == JOYAXISMOTION:\n if e.axis == dev._steeringAxis:\n dev._dir = dev._js.get_axis(dev._steeringAxis)\n except Exception:\n None",
"def _handleInput(self, paramInput):\n pass",
"def install_handle_input(self):\n pass",
"def handle_keyboard_data(data):\n pass",
"async def async_process_input(self, inp: inputs.Input) -> None:\n if isinstance(inp, inputs.ModSn):\n self.hardware_serial = inp.hardware_serial\n self.manu = inp.manu\n self.software_serial = inp.software_serial\n self.hardware_type = inp.hardware_type\n\n self.serial_known.set()\n await self.cancel()",
"def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_key_code(event)\n\n if code in self.codes:\n new_code = self.codes[code]\n else:\n new_code = 0\n event_type = self._get_event_type(event)\n value = self._get_key_value(event, event_type)\n scan_event, key_event = self.emulate_press(\n new_code, code, value, self.timeval)\n\n self.events.append(scan_event)\n self.events.append(key_event)\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n # We are done\n self.write_to_pipe(self.events)",
"def handle_input(self, token):\n self.pipeline.handle_input(token)",
"def inp():\n return joystick",
"def handle_input(self):\n\n\t\tline = sys.stdin.readline().strip()\n\n\t\tif line == '':\n\t\t\t# print('')\n\t\t\tself.print_prompt()\n\t\t\treturn\n\n\t\tcommand_name, *parts = line.split()\n\n\t\tif command_name in self.commands:\n\t\t\t# Call given command and unpack parts into args\n\t\t\tself.commands[command_name]['callback'](*parts)\n\t\telse:\n\t\t\tprint(command_name + ' : command not found')\n\t\t\tself.print_available_commands()\n\n\n\t\tself.print_prompt()",
"def handle_input(self, event):\n self.update_timeval()\n self.events = []\n code = self._get_event_type(event)\n\n # Deal with buttons\n self.handle_button(event, code)\n\n # Mouse wheel\n if code == 22:\n self.handle_scrollwheel(event)\n # Other relative mouse movements\n else:\n self.handle_relative(event)\n\n # Add in the absolute position of the mouse cursor\n self.handle_absolute(event)\n\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(self.events)",
"def input(self, description):\n if isinstance(description, (int, long)):\n self._input = description\n self._sendCommand('%02dFN' % description)\n elif description in self.inputs:\n self._input = self.inputs[description]\n self._sendCommand('%02dFN' % self.inputs[description])\n else:\n raise Exception('No such input: %s' % description)",
"def do_Device (self, line):",
"def set_input(self, input):\n self.input = transfer_to_device(input, self.device)",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"def handle_input(data: dict):",
"def user_input_listener(state: SharedState):",
"def handle_input(self, key):\n if key == 'Q' or key == 'q':\n if(self.proc is not None):\n self.proc.send_signal(signal.SIGINT)\n\n raise urwid.ExitMainLoop()\n if key == 'R' or key == 'r':\n self.model.running = True\n self.run()\n if key == 'P' or key == 'p':\n self.togglePause()",
"def act_on_input(self, input):\n if not input:\n return\n self.parse_input(input)\n commands = self.extract_commands()\n self.execute_commands(commands)",
"def input(self):\r\n pass",
"def input(self, description):\n if description in self.inputs:\n self.send(self.inputs[description])\n self._input = description\n else:\n raise Exception('No such input: %s' % description)",
"def do(self, argin):\n # overwrites the do hook\n\n device = self.target\n\n scan = json.loads(argin)\n\n device._scan_ID = int(scan[\"scan_id\"])\n\n data = tango.DeviceData()\n data.insert(tango.DevString, str(device._scan_ID))\n device._group_vcc.command_inout(\"Scan\", data)\n\n device._group_fsp_corr_subarray.command_inout(\"Scan\", data)\n device._group_fsp_pss_subarray.command_inout(\"Scan\", data)\n device._group_fsp_pst_subarray.command_inout(\"Scan\", data)\n\n # return message\n message = \"Scan command successful\"\n self.logger.info(message)\n return (ResultCode.STARTED, message)",
"def assignInputDevice(*args, clutch: AnyStr=\"\", continuous: bool=True, device: AnyStr=\"\",\n immediate: bool=True, multiple: bool=True, q=True, query=True,\n **kwargs)->Union[AnyStr, Any]:\n pass",
"def processInputs(self):",
"def handleInput(self, paramInput):\n MCMC.handleInput(self, paramInput)",
"def do_on_input_update(self, msg_id, payload, player):\n pass",
"def do_input(self, line):\n cmd_args = io.parse_cmd_args(line, io.input_cmd_pattern)\n if cmd_args:\n success = self.manager.input(\n cmd_args.get('target'), \n cmd_args.get('cslist'), \n mode=cmd_args.get('mode')\n )\n if success:\n self.console_print(\"Yippee! input successfull!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)",
"def __init__(self, dev_path, caps_mapping, pulse_handler):\n self.caps_mapping = caps_mapping\n self.pulse_handler = pulse_handler\n\n self.device = evdev.InputDevice(dev_path)\n\n if self._is_capable_of(caps_mapping):\n # Every 0.1 seconds is good enough\n self.handler_timeout = GLib.timeout_add(100, self._handle_events)\n else:\n self.device.close()",
"def act(self, device):\n with open(device, 'r') as fd:\n result = fcntl.ioctl(fd, self.ioctl)\n if result:\n raise Exception(\"ioctl failed with result {0}\".format(result))",
"def handle_input(self, proxy, event_type, event, refcon):\n self.update_timeval()\n self.events = []\n\n if event_type in (1, 2, 3, 4, 25, 26, 27):\n self.handle_button(event, event_type)\n\n if event_type == 22:\n self.handle_scrollwheel(event)\n\n # Add in the absolute position of the mouse cursor\n self.handle_absolute(event)\n\n # Add in the relative position of the mouse cursor\n self.handle_relative(event)\n\n # End with a sync marker\n self.events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(self.events)",
"def inb():",
"def handle_input():\n\n # wait for user input and get timeout or character to process\n char = read_input()\n\n # handle user input\n if not is_input_valid(char):\n # No valid input, keep waiting for input\n return True\n\n # if terminal size is not valid, stop here\n if not nuqql.config.WinConfig.is_terminal_valid():\n show_terminal_warning()\n return True\n\n # if terminal resized, resize and redraw active windows\n if char == curses.KEY_RESIZE:\n nuqql.conversation.resize_main_window()\n return True\n\n # pass user input to active conversation\n for conv in nuqql.conversation.CONVERSATIONS:\n if conv.is_active():\n conv.process_input(char)\n return True\n\n # if no conversation is active pass input to active list window\n if nuqql.win.MAIN_WINS[\"list\"].state.active:\n # list window navigation\n nuqql.win.MAIN_WINS[\"input\"].redraw()\n nuqql.win.MAIN_WINS[\"log\"].redraw()\n nuqql.win.MAIN_WINS[\"list\"].process_input(char)\n return True\n\n # list window is also inactive -> user quit\n return False",
"def uninstall_handle_input(self):\n pass",
"async def async_process_input(self, inp: inputs.Input) -> None:\n raise NotImplementedError()",
"def device_event(observer, device):\n if (device.action == \"add\"):\n print(\"conectado\")\n name = device.sys_name\n print(name)\n print(name[len(name) - 4])\n if(name[len(name) - 4] == \":\"):\n print(\"device mala\")\n else:\n time.sleep(5)\n try:\n with open(\"/media/usb0/LABSD.txt\", \"r\") as f:\n data = f.readlines()\n except IOError:\n print('cannot open')\n else:\n dataprocess(data)\n f.close()\n elif (device.action == \"remove\"):\n print(\"desconectado\")\n else:\n print(\"error\")",
"def process_input(self):\n for event in pygame.event.get():\n\n if self.joystick and self.state == self.STATE_PLAY:\n\n if event.type == pygame.JOYAXISMOTION:\n self.gameevents.add(\"joyaxismotion\", event.axis, event.value, type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONDOWN:\n if event.button == self.fire_button:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.type == pygame.JOYBUTTONUP:\n if event.button == self.fire_button:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.button == self.IFF_button:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.button == self.shots_button:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.button == self.pnts_button:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')\n\n else:\n\n if event.type == pygame.KEYDOWN:\n\n if (pygame.key.get_mods() & self.modifier):\n if event.key == pygame.K_q:\n self.gameevents.add(\"press\", \"quit\", type='EVENT_USER')\n\n if event.key == pygame.K_RETURN:\n\n if self.state == self.STATE_INTRO:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_SETUP:\n self.state = self.STATE_GAMENO\n\n elif self.state == self.STATE_GAMENO:\n if self.mine_exists:\n self.state = self.STATE_SETUP_IFF\n else:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_IFF:\n self.state = self.STATE_PREPARE\n\n elif self.state == self.STATE_SCORES:\n self.state = self.STATE_SETUP\n\n elif self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"press\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"press\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"press\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"press\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"press\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"press\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"press\", \"pnts\", type='EVENT_USER')\n elif event.key == self.pause_key and self.config['General']['allow_pause']:\n self.gameevents.add(\"press\", \"pause\", type='EVENT_USER')\n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n \n elif self.state == self.STATE_PAUSED and event.key == self.pause_key:\n self.gameevents.add(\"press\", \"unpause\", type='EVENT_USER')\n \n else:\n self.gameevents.add(\"press\", event.key, \"user\", type='EVENT_SYSTEM')\n\n elif event.type == pygame.KEYUP:\n\n if self.state == self.STATE_PLAY:\n\n if event.key == self.thrust_key:\n self.gameevents.add(\"release\", \"thrust\", type='EVENT_USER')\n elif event.key == self.left_turn_key:\n self.gameevents.add(\"release\", \"left\", type='EVENT_USER')\n elif event.key == self.right_turn_key:\n self.gameevents.add(\"release\", \"right\", type='EVENT_USER')\n elif event.key == self.fire_key:\n self.gameevents.add(\"release\", \"fire\", type='EVENT_USER')\n elif event.key == self.IFF_key:\n self.gameevents.add(\"release\", \"iff\", type='EVENT_USER')\n elif event.key == self.shots_key:\n self.gameevents.add(\"release\", \"shots\", type='EVENT_USER')\n elif event.key == self.pnts_key:\n self.gameevents.add(\"release\", \"pnts\", type='EVENT_USER')",
"def ask_for_device():\n return input(\"Device name or ip address : \")",
"def __process_input(self):\n\n while not self.stop_event.isSet():\n\n readable, writable, exceptional = select.select([self.event_queue], [], [])\n\n if readable[0] is self.event_queue:\n\n event = self.event_queue.get()\n \n if (time.time() - event.creation) > INSTRUCTION_TIMEOUT:\n self.logging_queue.put(self.__create_event_obj(ERROR, 'TimeOut', str(time.time() - event.creation)))\n self.logger1.info(\"Instruction rejected due to timeout: '{}', '{}', '{}'\".format(event.source, event.type, event.value))\n \n elif not self.__input_filter(event):\n self.logging_queue.put(self.__create_event_obj(ERROR, 'Filtered', '{}, {}, {}'.format(event.source, event.type, event.value)))\n \n else:\n \n self.logging_queue.put(event) \n \n if event.type == self.input_commands.toggle_door_cmd:\n self.__toggle_door()\n self.__update_door_info()\n elif event.type == self.input_commands.light_cmd:\n self.__light()\n elif event.type == self.input_commands.open_door_cmd:\n self.__open_door()\n self.__update_door_info()\n elif event.type == self.input_commands.close_door_cmd:\n self.__close_door()\n self.__update_door_info()\n elif event.type == self.input_commands.control_wire:\n self.__log_output_pin_state(event)\n self.__update_door_info()\n elif event.type == self.input_commands.stop_cmd:\n self.__del__()\n return None\n \n \n #if event.hardware:\n # self.__log_input_pin_state(event) ",
"def listen(self):\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n axis=self.axis_data\n\n if 0 in axis:\n self.x=axis[0]\n self.y=-axis[1]\n\n # Turbo\n if self.button_data[7]:\n self.x*=2\n self.y*=2\n # Start Camera\n if self.button_data[3]:\n subprocess.Popen([\"firefox\",otraip+\"/html\"],\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return \"camera\"\n\n # Measure\n if self.button_data[1]:\n return \"measure\"\n\n # Exit\n if self.button_data[2]:\n return \"exit\"\n return \"move \"+str(self.x)+\" \"+str(self.y)+\"\\n\"",
"def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)",
"def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))",
"def _handleInput(self):\n\n Game.Player.running(Game.ControlState[Game.MoveRight], not (Game.ControlState[Game.MoveRight] == Game.ControlState[Game.MoveLeft]))\n Game.Player.jumping(Game.ControlState[Game.Jump])\n Game.Player.flying(Game.ControlState[Game.Fly])\n Game.Player.firing(Game.ControlState[Game.Fire])",
"def process_inputs(self, inputs):",
"def d_input(self):\n pass",
"def handle_input(self, ncode, wparam, lparam):\n value = WIN_KEYBOARD_CODES[wparam]\n scan_code = lparam.contents.scan_code\n vk_code = lparam.contents.vk_code\n self.update_timeval()\n\n events = []\n # Add key event\n scan_key, key_event = self.emulate_press(\n vk_code, scan_code, value, self.timeval)\n events.append(scan_key)\n events.append(key_event)\n\n # End with a sync marker\n events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(events)\n\n return ctypes.windll.user32.CallNextHookEx(\n self.hooked, ncode, wparam, lparam)",
"def on_user_input(self, dut_address, reply_boolean, expected_ui_event):\n pass",
"def driver_input(self, command: bool) -> None:\n self.input_command = command",
"def handle_sensor_data(data):\n \n #print dir( data )\n D.data = data\n\n #Check for a bump\n if data.bumpRight or data.bumpLeft:\n print \"Bumped!\"\n\n\n #Check if play button was pressed\t\n if data.play:\n\tprint \"Play button pressed!\"\n\tStateMachine.state_stop()\n\trospy.signal_shutdown(\"play button pressed\")",
"def read_inputs(self):\n self.in_power.read()\n self.in_alert.read()",
"def input(self, p_addr = 0):\n\t\tif len(self.input_data):\n\t\t\tin_pos = self.get_address(p_addr, 1)\n\t\t\tin_val = self.input_data.pop(0)\n\t\t\tself.set_data(in_pos, in_val)\n\t\t\tself.pos += 2\n\t\telse:\n\t\t\tif self.disp_pause:\n\t\t\t\tprint(\"PAUSED\")\n\t\t\tself.pause = True",
"def _process_input(self, fd):\n if fd.fileno() == self._proxyfd.fileno():\n pkt = self._grab_packet(\n lambda data, s=self: s.create_packet(packet=data), fd)\n self._handle_proxy_packet(pkt)\n else:\n Server._process_input(self, fd)",
"def handle_input(self, ncode, wparam, lparam):\n x_pos = lparam.contents.x_pos\n y_pos = lparam.contents.y_pos\n data = lparam.contents.mousedata\n\n # This is how we can distinguish mouse 1 from mouse 2\n # extrainfo = lparam.contents.extrainfo\n # The way windows seems to do it is there is primary mouse\n # and all other mouses report as mouse 2\n\n # Also useful later will be to support the flags field\n # flags = lparam.contents.flags\n # This shows if the event was from a real device or whether it\n # was injected somehow via software\n\n self.emulate_mouse(wparam, x_pos, y_pos, data)\n\n # Give back control to Windows to wait for and process the\n # next event\n return ctypes.windll.user32.CallNextHookEx(\n self.hooked, ncode, wparam, lparam)",
"def handle_inputs(self):\n user_input = \"\"\n while user_input != \"exit\":\n self.print_divider()\n user_input = input()\n self.do_action_for_input(user_input)",
"def input_reader_worker():\n while True:\n global last_read_player1_input\n global last_read_player2_input\n\n # First read movement inputs from adc\n # First write byte to read from Vin3 - player1 input channel\n try:\n bus.write_byte(I2CADDR, 0x80)\n last_read_player1_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player1_input to a default value instead.\")\n last_read_player1_input[\"movement\"] = config.adc_max_val / 2\n\n # Now write to read from Vin4 - player2 input channel\n try:\n bus.write_byte(I2CADDR, 0x40)\n last_read_player2_input[\"movement\"] = read_from_adc()\n except IOError:\n logging.warning(\"hardware_input: IOError when writing to bus. Setting last_read_player2_input to a default value instead.\")\n last_read_player2_input[\"movement\"] = config.adc_max_val / 2\n\n # Then read switch inputs from GPIO ports\n try:\n last_read_player1_input[\"stretch\"] = GPIO.input(config.gpio_pin_p1_stretch)\n last_read_player1_input[\"serve\"] = GPIO.input(config.gpio_pin_p1_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player1 switch input\")\n\n try:\n last_read_player2_input[\"stretch\"] = GPIO.input(config.gpio_pin_p2_stretch)\n last_read_player2_input[\"serve\"] = GPIO.input(config.gpio_pin_p2_serve)\n except IOError:\n logging.warning(\"hardware_input: Unable to read player2 switch input\")\n\n time.sleep(1 / float(config.adc_updates_per_sec))",
"def _input_latency_handler(self, irq_name):\n self._input_latencies = IntervalList()\n all_tasks = self._trace.cpu.task_intervals()\n all_aq_events = self.input_events()\n touch_irqs = IntervalList(filter_by_task(\n all_tasks, 'name', irq_name, 'any'))\n def _input_intervals():\n \"\"\"\n Generator that yields intervals when discrete input event(s)\n are read & decoded by Android `Input Reader`.\n\n x__x__x____IR___ID_ID_ID___DI_SU__DI_SU__DI_SU______\n\n x = multiple input IRQs (multi-touch translated by Android Input Framework)\n IR = Input Reader [read/decodes multiple events @ once]\n ID = Input Dispatch [dispatches each input event]\n DI = Deliver Input [ appropriate window consumes input event ]\n SU = SurfaceFlinger Screen Update due to window handling input event\n\n Please note InputReader 'iq' will be set to 1 whenever InputReader\n had event to process. This could be disabled in some systems.\n \"\"\"\n last_timestamp = self._trace.interval.start\n for ir_event in filter_by_task(all_tasks, 'name', 'InputReader', 'any'):\n if last_timestamp <= ir_event.interval.end:\n yield Interval(last_timestamp, ir_event.interval.end)\n last_timestamp = ir_event.interval.end\n\n for interval in _input_intervals():\n irqs = touch_irqs.slice(interval=interval, trimmed=False)\n # Necessary as we may be interested in different IRQ name\n if irqs:\n # Use longest IRQ\n start_ts = min(irqs, key=lambda x: x.interval.start).interval.start\n end_ts = start_ts\n post_ir_interval = Interval(start_ts, self._trace.duration)\n di_events = self.event_intervals(name=['deliverInputEvent', 'input'], interval=post_ir_interval)\n\n if di_events:\n # IMPORTANT: If InputDispatcher sythesizes multiple\n # events to same application, we ignore consequent event\n # and only parse 1st event. This is because we heuristically\n # can't determine start of next input event to differentiate.\n di_event = di_events[0]\n # necessary in case a synthetic events is cancelled\n # canceled appropriately when the events are no longer\n # being resynthesized (because the application or IME is\n # already handling them or dropping them entirely)\n # This is done by checking for dumping input latencies when\n # active input event queue length (aq) is > 1 for same task.\n\n # For more details, see\n # https://android.googlesource.com/platform/frameworks/base.git/+\n # /f9e989d5f09e72f5c9a59d713521f37d3fdd93dd%5E!/\n\n # This returns first interval when aq has pending event(s)\n di_event_name = getattr(di_event, 'name', None)\n if di_event_name and di_event_name == 'input':\n pfb_events = self.event_intervals(name='doComposition', interval=post_ir_interval)\n else: \n aq_event = filter_by_task(all_aq_events.slice(\n interval=post_ir_interval),\n 'pid', di_event.event.task.pid)\n \n if aq_event and aq_event.value > 0:\n post_di_start = aq_event.interval.start\n else:\n if aq_event:\n continue # if AQ event exists.\n post_di_start = di_events[0].interval.start\n \n print 'post_di_start = ' + str(post_di_start)\n post_di_interval = Interval(post_di_start,\n self._trace.duration)\n \n pfb_events = self.event_intervals(name='doComposition', interval=post_di_interval)\n \n if pfb_events:\n end_ts = pfb_events[0].interval.end\n if start_ts != end_ts and end_ts > start_ts and start_ts not in self._input_latencies._start_timestamps:\n input_interval = Interval(start=start_ts, end=end_ts)\n self._input_latencies.append(InputLatency(interval=input_interval,\n latency=input_interval.duration))\n\n return self._input_latencies",
"def handle_input(self, events):\n for event in events:\n if event.type == QUIT:\n sys.exit(0)\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n # pressing \"Q\" quits the card\n sys.exit(0)\n if event.key == pygame.K_m:\n # pressing \"M\" toggles the music\n if self.playing_music:\n pygame.mixer.music.pause()\n else:\n pygame.mixer.music.unpause()\n self.playing_music = not self.playing_music\n elif event.key == pygame.K_s:\n # pressing \"S\" toggles the snow\n if self.snowing:\n self.snow = []\n else:\n self.let_it_snow(self.intensity)\n self.snowing = not self.snowing",
"def ev_joydeviceremoved(self, event: tcod.event.JoystickDevice) -> T | None:",
"def handle(self, handler_input):\n speech = \"I'm a sample Alexa Skill. Let me give you a random Chuck Norris Fact. \"\n speech += getChuckFact()\n speech += \". Do you want more awesome Chuck facts?\"\n \n \"\"\"\n Take note of the set_should_end_session. If set to 'True', the alexa\n skill will gracefully end execution.AbstractExceptionHandler\n \n The set_card method specifies what kind of cards do you want to use when\n interacting with the user via display. A 'SimpleCard' display's text.\n \n For more info about cards, see:\n https://developer.amazon.com/docs/custom-skills/include-a-card-in-your-skills-response.html\n \"\"\"\n handler_input.response_builder.speak(speech).set_card(\n SimpleCard(speech)).set_should_end_session(False)\n return handler_input.response_builder.response",
"def handle_sensor_data(data):\n\n #Store incoming data in the Data object\n D.data = data\n\n #Check for a bump\n if data.bumpRight or data.bumpLeft:\n print \"Bumped!\"\n\n\n #Check if play button was pressed\t\n if data.play:\n\tprint \"Stopping...\"\n\tStateMachine.state_stop()\n\trospy.signal_shutdown(\"play button pressed\")\n\n #Check key presses\n key_press = cv.WaitKey(5) & 255\n if key_press != 255:\n \tcheck_key_press(D, key_press)\t\n\n #Display robot updates in Monitor window\n draw_on_image(D)",
"def handle(self, data):\n pass",
"def input(self):",
"def gen_input_handler(cf_man, cf_dat, cf_viewer):\n def keyinput(key):\n \"\"\"Switch between lists, save data and quit on key input.\"\"\"\n if key == 'meta q':\n raise urwid.ExitMainLoop()\n elif key == 'w':\n cf_dat.backup_files()\n cf_viewer.timed_msg(1, ': Saving file')\n cf_dat.write_config_file()\n elif key in ['right', 'tab']:\n if cf_viewer.cfg_pile.get_focus() == cf_man.cfg_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.opt_lb)\n elif cf_viewer.cfg_pile.get_focus() == cf_man.opt_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.imp_lb)\n else:\n cf_viewer.cfg_pile.set_focus(cf_man.cfg_lb)\n elif key in ['left', 'shift tab']:\n if cf_viewer.cfg_pile.get_focus() == cf_man.cfg_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.imp_lb)\n elif cf_viewer.cfg_pile.get_focus() == cf_man.opt_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.cfg_lb)\n else:\n cf_viewer.cfg_pile.set_focus(cf_man.opt_lb)\n\n return keyinput",
"def on_hid_pnp(self, hid_event = None):\r\n # keep old reference for UI updates\r\n old_device = self.device\r\n\r\n if hid_event:\r\n print(\"Hey, a hid device just %s!\" % hid_event)\r\n \r\n if hid_event == \"connected\":\r\n # test if our device is available\r\n if self.device:\r\n # see, at this point we could detect multiple devices!\r\n # but... we only want just one\r\n pass\r\n else:\r\n self.test_for_connection()\r\n elif hid_event == \"disconnected\":\r\n # the hid object is automatically closed on disconnection we just\r\n # test if still is plugged (important as the object might be\r\n # closing)\r\n if self.device and not self.device.is_plugged():\r\n self.device = None\r\n print(\"you removed my hid device!\")\r\n else:\r\n # poll for devices\r\n self.test_for_connection()\r\n\r\n if old_device != self.device:\r\n # update ui\r\n pass",
"def ev_textinput(self, event: TextInput) -> None:",
"def handle_read(self):\n pass",
"def process_event(event, device_id):\n print(event)\n if event.type == EventType.ON_CONVERSATION_TURN_STARTED:\n adjustvolume('30')\n subprocess.Popen([\"aplay\", \"/opt/RPIGassistant/audio-files/Listening.wav\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_RESPONDING_STARTED and event.args and not event.args['is_error_response']):\n GPIO.output(5,GPIO.LOW)\n GPIO.output(6,GPIO.HIGH)\n led.ChangeDutyCycle(50)\n\n if event.type == EventType.ON_RESPONDING_FINISHED:\n GPIO.output(6,GPIO.LOW)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT):\n say(random.choice(['sorry, i did not hear what you said', \n 'sorry, i did not hear anything', \n 'pardon', \n 'sorry, have you said something?']))\n restorevolume()\n print()\n\n if (event.type == EventType.ON_NO_RESPONSE):\n restorevolume()\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and\n event.args and not event.args['with_follow_on_turn']):\n restorevolume()\n GPIO.output(5,GPIO.LOW)\n led.ChangeDutyCycle(0)\n print()\n\n if event.type == EventType.ON_DEVICE_ACTION:\n for command, params in process_device_actions(event, device_id):\n print('Do command', command, 'with params', str(params))",
"def listen(self):\n while self.active:\n self.handle_input()",
"def device_event(observer, action, device):\n if (device.action == \"add\"):\n print(\"conectado\")\n name = device.sys_name\n print(name)\n print(name[len(name) - 4])\n if(name[len(name) - 4] == \":\"):\n print(\"Duplicado\")\n else:\n time.sleep(5)\n try:\n with open(\"/media/usb0/LABSD.txt\", \"r\") as f:\n data = f.readlines()\n except IOError:\n print('cannot open')\n else:\n dataprocess(data)\n f.close()\n elif (device.action == \"remove\"):\n print(\"desconectado\")\n else:\n print(\"error\")",
"def read(self, handler_name, rawdata):\r\n\r\n if handler_name in self.__events:\r\n data = self.__events[handler_name].handle(rawdata)\r\n if data.toflow() is not None:\r\n self.__to_flow.send(data.toflow())\r\n if data.todata() is not None:\r\n self.__to_data.send(data.todata())\r\n if data.toui() is not None:\r\n self.__to_ui.send(data.toui())\r\n else:\r\n raise MGPError('InputArea.read(): Tried to read for non-existing name \"{}\".'\r\n .format(handler_name))",
"def do_input_events(self):\r\n for event in EventStream.allNext(self.streams):\r\n if self.handler.event(event) and self.unhandledHandler:\r\n self.unhandledHandler(event)",
"def mic_input():\n try:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print('Say something...')\n r.pause_threshold = 1\n r.adjust_for_ambient_noise(source, duration=1)\n audio = r.listen(source)\n try:\n command = r.recognize_google(audio).lower()\n print('You said: ' + command + '\\n')\n except sr.UnknownValueError:\n print('....')\n command = self.mic_input()\n return command\n except Exception as e:\n print(e)\n return False",
"def event(self, event):\r\n ret = event\r\n self.mutex.acquire()\r\n\r\n try:\r\n Handled = False\r\n if event.eventType == EV_SYN:\r\n if self.synHandler:\r\n self.synHandler(event.stream.deviceType, event.stream.deviceIndex, event.eventCode, event.eventValue)\r\n ret = None\r\n elif event.eventType == EV_KEY:\r\n if event.stream.grabbed == False and event.eventValue != 0:\r\n ret = None\r\n self.buttons[event.eventCode] = event.eventValue\r\n if self.keyHandler:\r\n self.keyHandler(event.stream.deviceType, event.stream.deviceIndex, event.eventCode, event.eventValue)\r\n ret = None\r\n elif event.eventType == EV_REL:\r\n if event.eventCode == REL_X:\r\n self.relx[event.stream.deviceIndex] += event.eventValue\r\n if self.relHandler:\r\n self.relHandler(event.stream.deviceType, event.stream.deviceIndex, event.eventValue, 0, 0, 0)\r\n ret = None\r\n elif event.eventCode == REL_Y:\r\n self.rely[event.stream.deviceIndex] += event.eventValue\r\n if self.relHandler:\r\n self.relHandler(event.stream.deviceType, event.stream.deviceIndex, 0, event.eventValue, 0, 0)\r\n ret = None\r\n elif event.eventCode == REL_WHEEL:\r\n self.relv[event.stream.deviceIndex] += event.eventValue\r\n if self.relHandler:\r\n self.relHandler(event.stream.deviceType, event.stream.deviceIndex, 0, 0, event.eventValue, 0)\r\n ret = None\r\n elif event.eventCode == REL_HWHEEL:\r\n self.relh[event.stream.deviceIndex] += event.eventValue\r\n if self.relHandler:\r\n self.relHandler(event.stream.deviceType, event.stream.deviceIndex, 0, 0, 0, event.eventValue)\r\n elif event.eventCode == REL_DIAL:\r\n self.relh[event.stream.deviceIndex] += event.eventValue\r\n if self.relHandler:\r\n self.relHandler(event.stream.deviceType, event.stream.deviceIndex, 0 ,0, 0, event.eventValue)\r\n ret = None\r\n elif event.eventType == EV_ABS:\r\n if event.eventCode == ABS_X:\r\n Handled = True\r\n self.absx[event.stream.deviceIndex] = event.stream.scale(EventStream.axisX, event.eventValue)\r\n elif event.eventCode == ABS_Y:\r\n Handled = True\r\n self.absy[event.stream.deviceIndex] = event.stream.scale(EventStream.axisY, event.eventValue)\r\n elif event.eventCode == ABS_Z:\r\n Handled = True\r\n self.absz[event.stream.deviceIndex] = event.stream.scale(EventStream.axisZ, event.eventValue)\r\n elif event.eventCode == ABS_RX:\r\n Handled = True\r\n self.absx2[event.stream.deviceIndex] = event.stream.scale(EventStream.axisRX, event.eventValue)\r\n elif event.eventCode == ABS_RY:\r\n Handled = True\r\n self.absy2[event.stream.deviceIndex] = event.stream.scale(EventStream.axisRY, event.eventValue)\r\n elif event.eventCode == ABS_RZ:\r\n Handled = True\r\n self.absz2[event.stream.deviceIndex] = event.stream.scale(EventStream.axisRZ, event.eventValue)\r\n elif event.eventCode == ABS_HAT0X:\r\n Handled = True\r\n self.abshatx[event.stream.deviceIndex] = event.stream.scale(EventStream.axisHat0X, event.eventValue)\r\n elif event.eventCode == ABS_HAT0Y:\r\n Handled = True\r\n self.abshaty[event.stream.deviceIndex] = event.stream.scale(EventStream.axisHat0Y, event.eventValue)\r\n if Handled:\r\n if self.absHandler:\r\n self.absHandler(event.stream.deviceType, event.stream.deviceIndex,\r\n self.absx[event.stream.deviceIndex], self.absy[event.stream.deviceIndex], self.absz[event.stream.deviceIndex],\r\n self.absx2[event.stream.deviceIndex], self.absy2[event.stream.deviceIndex], self.absz2[event.stream.deviceIndex],\r\n self.abshatx[event.stream.deviceIndex], self.abshaty[event.stream.deviceIndex])\r\n ret = None\r\n finally:\r\n self.mutex.release()\r\n return ret",
"def input_(self, op):\n value = input(\"Enter your input: \")\n self.set_value(op.address, value, op.type_, op.is_global)",
"def _handle_event(event):\n if event.device.id_string != self._event.device.id_string:\n return\n\n self.apply_event(event)",
"def _handle_events(self):\n try:\n for event in self.device.read():\n if event.value == 2:\n # Key repeat event, we don't care\n continue\n mapped_cap = self.caps_mapping.get(event.type, {}).get(event.code, None)\n if event.value and mapped_cap:\n mapped_cap()\n except BlockingIOError:\n # Just means there's nothing to read at the moment\n pass\n\n self.device.set_led(evdev.ecodes.LED_NUML, self.pulse_handler.output_muted)\n self.device.set_led(evdev.ecodes.LED_CAPSL, self.pulse_handler.input_muted)\n\n return True # Needed to make GLib rerun the function on the next timeout",
"def stdin(self):\n pass",
"def get_input(self):\n pass",
"def ev_controllerdeviceremoved(self, event: tcod.event.ControllerDevice) -> T | None:",
"def handle_user_input(self,args):\n import ipcAPI,config\n\n message = self.get_message(args)\n try:\n client = ipcAPI.ipcClient(config.SOCKET_FILE)\n\n client.send_data(message)\n\n resp = client.recv_data()\n\n print(\"{} : {}\".format(resp['status'],resp['message']))\n except:\n print(\"Error while communicating with server\")\n return False\n \n return True",
"def set_input(self, input):\n pass",
"def set_input(self, input):\n pass",
"def read_and_process_input(self):\n try:\n # Force = 2. One to move the player, one to push an arbitrary item.\n self.player.move(get_option(movement_by_key), force=2)\n except MovementError:\n return",
"def read_input(inp):\n epoll = select.epoll()\n epoll.register(sys.stdin.fileno(), select.EPOLLIN)\n while inp.running:\n if is_terminated():\n return\n\n events = epoll.poll(1)\n for fileno, event in events:\n line = \"[\"\n while \"[\" in line:\n line = sys.stdin.readline().strip(\",\").strip()\n inp.has_event = True\n try:\n event = json.loads(line)\n if \"instance\" in event:\n inp.callback(event)\n inp.redraw()\n except ValueError:\n pass\n epoll.unregister(sys.stdin.fileno())\n epoll.close()\n inp.has_event = True\n inp.clean_exit = True",
"def handle_audio_input(message):\n def build_context(msg: Message):\n ctx = {'client_name': 'mycroft_listener',\n 'source': msg.context.get(\"source\" or \"speech_api\"),\n 'destination': [\"skills\"],\n \"audio_parser_data\": msg.context.get(\"audio_parser_data\"),\n \"client\": msg.context.get(\"client\"), # origin (local, klat, nano, mobile, api)\n \"neon_should_respond\": msg.context.get(\"neon_should_respond\"),\n \"username\": msg.context.get(\"username\"),\n \"timing\": {\"start\": msg.data.get(\"time\"),\n \"transcribed\": time.time()},\n \"ident\": msg.context.get(\"ident\", time.time())\n }\n if msg.context.get(\"klat_data\"):\n ctx[\"klat_data\"] = msg.context(\"klat_data\")\n ctx[\"nick_profiles\"] = msg.context.get(\"nick_profiles\")\n return ctx\n\n ident = message.context.get(\"ident\") or \"neon.audio_input.response\"\n wav_file_path = message.data.get(\"audio_file\")\n lang = message.data.get(\"lang\")\n try:\n _, parser_data, transcriptions = _get_stt_from_file(wav_file_path, lang)\n message.context[\"audio_parser_data\"] = parser_data\n context = build_context(message)\n data = {\n \"utterances\": transcriptions,\n \"lang\": message.data.get(\"lang\", \"en-us\")\n }\n handled = _emit_utterance_to_skills(Message('recognizer_loop:utterance', data, context))\n bus.emit(message.reply(ident, data={\"parser_data\": parser_data,\n \"transcripts\": transcriptions,\n \"skills_recv\": handled}))\n except Exception as e:\n LOG.error(e)\n bus.emit(message.reply(ident, data={\"error\": repr(e)}))",
"def setInput(self):\n gpio.setup(self.bcm_id, gpio.IN, pull_up_down=self.pull)\n self.mode = gpio.IN",
"def handle_incoming_data(self, connection, data):\n if data == 'quit':\n self.close_connection(connection)\n elif isinstance(data, tuple) and data[0] in ('login', 'register'):\n if data[1] in self.connections.values():\n self.send(connection, self.auth.already_logged())\n else:\n auth_response = self.auth.identify_user(*data)\n if auth_response['flag']:\n self.connections[connection] = data[1]\n self.send(connection, auth_response)\n elif isinstance(data, str) and data.startswith('@'):\n self.route(connection, data)\n else:\n self.broadcast(connection, data)",
"def modify_input(self, raw_input_par):\r\n raise NotImplementedError",
"def modify_input(self, raw_input_par):\r\n raise NotImplementedError",
"def listen(self):\n\n if not self.key_data:\n self.key_data = {}\n for i in range(1024):\n self.key_data[i] = False\n\n if not self.axis_data:\n self.axis_data = {}\n for i in range(self.controller.get_numaxes()):\n self.axis_data[i] = 0.0\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n debug_toggle = True\n print_state_toggle = True\n\n # These parameters define how frequesnt speed setting sent over serial to arduino\n speed_threshold = 10.0 # sets update threshold\n speed_step = 1 # sets acceleration\n speed_delay = 0.01 # delay per 1 step in sec\n\n mode_switch = \"j\" # control mode: k - keyboard, j - joystick\n\n # Parameters for keyboard control mode\n speed = 0.0\n speed_current = 0\n direction = \"r\" # r - release, f - forward, b - backward\n direction_current = \"r\"\n\n # Parameters for joystick control mode\n speed_l = 0\n speed_r = 0\n prev_speed_l = 0\n prev_speed_r = 0\n prev_btn = False\n\n while True:\n prev = self.axis_data\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n self.key_data[event.key] = True\n elif event.type == pygame.KEYUP:\n self.key_data[event.key] = False\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value,2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n # check for exit command\n if self.button_data[9] or self.key_data[pygame.QUIT] or self.key_data[pygame.K_ESCAPE]:\n pygame.quit()\n break\n\n # toggle debug\n if self.key_data[pygame.K_d]:\n if debug_toggle:\n print(\"Toggle debug\")\n self.ser.write(b'd')\n debug_toggle = False\n else:\n debug_toggle = True\n\n # print out motors status\n if self.key_data[pygame.K_p]:\n if print_state_toggle:\n self.ser.write(b'p')\n if self.ser.in_waiting:\n print (self.ser.readline())\n print_state_toggle = False\n else:\n print_state_toggle = True\n\n if self.key_data[pygame.K_1] and mode_switch != \"k\":\n mode_switch = \"k\"\n\n if self.key_data[pygame.K_2] and mode_switch != \"j\":\n print(\"Joystick mode: ON\")\n mode_switch = \"j\"\n\n if mode_switch == \"k\": # keyboard control mode\n # accelearte forward\n if self.key_data[pygame.K_a] and direction != \"r\":\n if speed < 255.0:\n speed = speed + speed_step\n sleep(speed_delay)\n # accelerate backward\n if self.key_data[pygame.K_z] and direction != \"r\":\n if speed > 0.0:\n speed = speed - speed_step\n sleep(speed_delay)\n\n if self.key_data[pygame.K_UP] and direction != \"f\":\n direction = \"f\"\n if self.key_data[pygame.K_DOWN] and direction != \"b\":\n direction = \"b\"\n if self.key_data[pygame.K_UP] == False and direction == \"f\":\n direction = \"r\"\n if self.key_data[pygame.K_DOWN] == False and direction == \"b\":\n direction = \"r\"\n\n if math.fabs(speed - speed_current) > speed_threshold or direction != direction_current:\n # print(\"{0}, {1}, {2}, {3}\".format(speed, speed_current, direction, direction_current))\n direction_current = direction\n if direction == \"r\":\n speed = 0.0\n speed_current = int(speed)\n str_r = \"sr\" + direction_current + str(speed_current) + \"e\"\n str_l = \"sl\" + direction_current + str(speed_current) + \"e\"\n print(str_l)\n print(str_r)\n self.ser.write(str_r.encode())\n self.ser.write(str_l.encode())\n\n if(self.key_data[pygame.K_LEFT]):\n str_rf = \"srf\" + str(speed_current) + \"e\"\n self.ser.write(str_rf.encode())\n str_lf = \"slf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_lf.encode())\n elif(self.key_data[pygame.K_RIGHT]):\n str_rb = \"srf\" + str(int(speed_current*0.9)) + \"e\"\n self.ser.write(str_rb.encode())\n str_lb = \"slf\" + str(speed_current) + \"e\"\n self.ser.write(str_lb.encode())\n\n if (self.key_data[pygame.K_UP] == False and self.key_data[pygame.K_DOWN] == False) and (self.key_data[pygame.K_a] == False and self.key_data[pygame.K_z] == False):\n speed = 0\n speed_current = speed\n direction = \"r\"\n direction_current = direction\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n if mode_switch == \"j\": # joystick control mode\n if self.ser.in_waiting:\n data = str(self.ser.readline().strip())\n data = data[2 :len(data)-1]\n print(data)\n #self.aio.send('Team Hacky Slackers', data)\n\n prev_speed_l = speed_l\n prev_speed_r = speed_r\n speed_threshold = 1\n\n #simplified linear mapping for controller\n speed_l = int((self.axis_data[0]*(-50)) + 90)\n speed_r = int(math.fabs(self.axis_data[3]*255))\n #print(self.axis_data)\n #print(\"curr_l: {0}, perv_l: {1}, curr_r:{2}, perv_r:{3}\".format(speed_l, prev_speed_l, speed_r,prev_speed_r))\n\n if self.axis_data[0] < -0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lf = \"slf\" + str(speed_l) + \"e\"\n self.ser.write(str_lf.encode())\n elif self.axis_data[0] > 0.05 and math.fabs(speed_l - prev_speed_l) > speed_threshold:\n str_lb = \"slb\" + str(speed_l) + \"e\"\n self.ser.write(str_lb.encode())\n\n\n if self.axis_data[3] < -0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rf = \"srf\" + str(speed_r) + \"e\"\n self.ser.write(str_rf.encode())\n elif self.axis_data[3] > 0.03 and math.fabs(speed_r - prev_speed_r) > speed_threshold:\n str_rb = \"srb\" + str(speed_r) + \"e\"\n self.ser.write(str_rb.encode())\n\n if ( self.axis_data[0] >= -0.05 and self.axis_data[0] <= 0.05 ) and ( self.axis_data[3] >= -0.05 and self.axis_data[3] <= 0.05 ):\n speed_l = 90\n speed_r = 0\n self.ser.write(b'srze')\n self.ser.write(b'slze')\n\n #Logic to call RFID scan only once per click of R1 button\n # if(prev_btn != self.button_data[5]):\n # prev_btn = self.button_data[5]\n # if self.button_data[5] :\n # print(\"Scanning for RFID Card\")\n # self.ser.write(\"i\".encode())\n\n # clear()\n # pprint.pprint(self.button_data)\n # pprint.pprint(self.axis_data)\n # pprint.pprint(self.hat_data)",
"def __process_input(self, input_):\n if self.state.game_over:\n if input_.key_pressed:\n self.state.exit = True\n else:\n if input_.action == 'PLAYER_UP':\n self.state.player.direction = 'U'\n elif input_.action == 'PLAYER_DOWN':\n self.state.player.direction = 'D'\n elif input_.action == 'PLAYER_LEFT':\n self.state.player.direction = 'L'\n elif input_.action == 'PLAYER_RIGHT':\n self.state.player.direction = 'R'",
"def handle(text, mic, profile):\n os.system('if [ `cat /sys/class/gpio/gpio18/value` -eq \"0\" ]; then echo \"1\" > /sys/class/gpio/gpio18/value; else echo \"0\" > /sys/class/gpio/gpio18/value; fi')",
"def event_handler(self, event: dict) -> None:\n if (event_type := event[\"e\"]) not in (EVENT_ADDED, EVENT_CHANGED):\n LOGGER.debug(\"Unsupported event %s\", event)\n return\n\n if (resource_type := event[\"r\"]) not in (\n GROUP_RESOURCE,\n LIGHT_RESOURCE,\n SENSOR_RESOURCE,\n ):\n LOGGER.debug(\"Unsupported resource %s\", event)\n return\n\n device_class = getattr(self, resource_type)\n device_id = event[\"id\"]\n\n if event_type == EVENT_CHANGED and device_id in device_class:\n device_class.process_raw({device_id: event})\n if resource_type == LIGHT_RESOURCE and \"attr\" not in event:\n self.update_group_color([device_id])\n return\n\n if event_type == EVENT_ADDED and device_id not in device_class:\n device_class.process_raw({device_id: event[resource_type[:-1]]})\n device = device_class[device_id]\n if self.async_add_device_callback:\n self.async_add_device_callback(resource_type, device)\n return",
"def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak",
"def Demo():\n args = _Parse()\n device = args.device.lower()\n if device == 'keyboard':\n DemoBluetoothHIDKeyboard(args.remote_host_address, args.chars_to_send)\n elif device == 'mouse':\n DemoBluetoothHIDMouse(args.remote_host_address)\n else:\n args.print_help()",
"def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")",
"def process_input(self,r,g,b):\n pass",
"def create_device(self, layout):\n events = {ecodes.EV_ABS: [], ecodes.EV_KEY: [],\n ecodes.EV_REL: []}\n\n # Joystick device\n if layout.axes or layout.buttons or layout.hats:\n self.joystick_dev = next_joystick_device()\n\n for name in layout.axes:\n params = layout.axes_options.get(name, DEFAULT_AXIS_OPTIONS)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.hats:\n params = (0, -1, 1, 0, 0)\n if not absInfoUsesValue:\n params = params[1:]\n events[ecodes.EV_ABS].append((name, params))\n\n for name in layout.buttons:\n events[ecodes.EV_KEY].append(name)\n\n if layout.mouse:\n self.mouse_pos = {}\n self.mouse_rel = {}\n self.mouse_analog_sensitivity = float(\n layout.mouse_options.get(\"MOUSE_SENSITIVITY\",\n DEFAULT_MOUSE_SENSITIVTY)\n )\n self.mouse_analog_deadzone = int(\n layout.mouse_options.get(\"MOUSE_DEADZONE\",\n DEFAULT_MOUSE_DEADZONE)\n )\n self.scroll_repeat_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_REPEAT_DELAY\",\n DEFAULT_SCROLL_REPEAT_DELAY)\n )\n self.scroll_delay = float(\n layout.mouse_options.get(\"MOUSE_SCROLL_DELAY\",\n DEFAULT_SCROLL_DELAY)\n )\n\n for name in layout.mouse:\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n if ecodes.REL_WHEEL not in events[ecodes.EV_REL]:\n # This ensures that scroll wheel events can work\n events[ecodes.EV_REL].append(ecodes.REL_WHEEL)\n else:\n events[ecodes.EV_REL].append(name)\n self.mouse_rel[name] = 0.0\n\n self.device = UInput(name=layout.name, events=events,\n bustype=layout.bustype, vendor=layout.vendor,\n product=layout.product, version=layout.version)\n self.layout = layout",
"def handle_user_interface_buffer(self) -> None:\n user_interface_buffer = self.user_interface.buffer\n for command in user_interface_buffer:\n if command.lower() == 'register':\n self.handle_register_command()\n elif command.lower() == 'advertise':\n self.handle_advertise_command()\n elif command.lower().startswith('sendmessage'):\n self.handle_message_command(command)\n else:\n log('Are you on drugs?')\n self.user_interface.clear_buffer()",
"def do(self, argin):\n\n device = self.target\n\n # Code here\n device._corr_config = []\n device._pss_config = []\n device._pst_config = []\n device._corr_fsp_list = []\n device._pss_fsp_list = []\n device._pst_fsp_list = []\n device._fsp_list = [[], [], [], []]\n\n # validate scan configuration first \n try:\n device._validate_scan_configuration(argin)\n except tango.DevFailed as df:\n self.logger.error(str(df.args[0].desc))\n self.logger.warn(\"validate scan configuration error\")\n # device._raise_configure_scan_fatal_error(msg)\n\n # Call this just to release all FSPs and unsubscribe to events. \n # Can't call GoToIdle, otherwise there will be state transition problem. \n # TODO - to clarify why can't call GoToIdle\n device._deconfigure()\n\n # TODO - to remove\n # data = tango.DeviceData()\n # data.insert(tango.DevUShort, ObsState.CONFIGURING)\n # device._group_vcc.command_inout(\"SetObservingState\", data)\n\n full_configuration = json.loads(argin)\n common_configuration = copy.deepcopy(full_configuration[\"common\"])\n configuration = copy.deepcopy(full_configuration[\"cbf\"])\n # set band5Tuning to [0,0] if not specified\n if \"band_5_tuning\" not in common_configuration: \n common_configuration[\"band_5_tuning\"] = [0,0]\n\n # Configure configID.\n device._config_ID = str(common_configuration[\"config_id\"])\n\n # Configure frequencyBand.\n frequency_bands = [\"1\", \"2\", \"3\", \"4\", \"5a\", \"5b\"]\n device._frequency_band = frequency_bands.index(common_configuration[\"frequency_band\"])\n\n config_dict = { \"config_id\": common_configuration[\"config_id\"], \n \"frequency_band\": common_configuration[\"frequency_band\"] }\n json_str = json.dumps(config_dict)\n data = tango.DeviceData()\n data.insert(tango.DevString, json_str)\n device._group_vcc.command_inout(\"ConfigureScan\", data)\n\n # TODO: all these VCC params should be passed in via ConfigureScan()\n # Configure band5Tuning, if frequencyBand is 5a or 5b.\n if device._frequency_band in [4, 5]:\n stream_tuning = [*map(float, common_configuration[\"band_5_tuning\"])]\n device._stream_tuning = stream_tuning\n device._group_vcc.write_attribute(\"band5Tuning\", stream_tuning)\n\n # Configure frequencyBandOffsetStream1.\n if \"frequency_band_offset_stream_1\" in configuration:\n device._frequency_band_offset_stream_1 = int(configuration[\"frequency_band_offset_stream_1\"])\n else:\n device._frequency_band_offset_stream_1 = 0\n log_msg = \"'frequencyBandOffsetStream1' not specified. Defaulting to 0.\"\n self.logger.warn(log_msg)\n device._group_vcc.write_attribute(\"frequencyBandOffsetStream1\", device._frequency_band_offset_stream_1)\n\n # Validate frequencyBandOffsetStream2.\n # If not given, use a default value.\n # If malformed, use a default value, but append an error.\n if \"frequency_band_offset_stream_2\" in configuration:\n device._frequency_band_offset_stream_2 = int(configuration[\"frequency_band_offset_stream_2\"])\n else:\n device._frequency_band_offset_stream_2 = 0\n log_msg = \"'frequencyBandOffsetStream2' not specified. Defaulting to 0.\"\n self.logger.warn(log_msg)\n device._group_vcc.write_attribute(\"frequencyBandOffsetStream2\", device._frequency_band_offset_stream_2)\n\n # Configure dopplerPhaseCorrSubscriptionPoint.\n if \"doppler_phase_corr_subscription_point\" in configuration:\n attribute_proxy = tango.AttributeProxy(configuration[\"doppler_phase_corr_subscription_point\"])\n attribute_proxy.ping()\n event_id = attribute_proxy.subscribe_event(\n tango.EventType.CHANGE_EVENT,\n device._doppler_phase_correction_event_callback\n )\n device._events_telstate[event_id] = attribute_proxy\n\n # Configure delayModelSubscriptionPoint.\n if \"delay_model_subscription_point\" in configuration:\n device._last_received_delay_model = \"{}\"\n attribute_proxy = tango.AttributeProxy(configuration[\"delay_model_subscription_point\"])\n attribute_proxy.ping() #To be sure the connection is good(don't know if the device is running)\n event_id = attribute_proxy.subscribe_event(\n tango.EventType.CHANGE_EVENT,\n device._delay_model_event_callback\n )\n device._events_telstate[event_id] = attribute_proxy\n\n # Configure jonesMatrixSubscriptionPoint\n if \"jones_matrix_subscription_point\" in configuration:\n device._last_received_jones_matrix = \"{}\"\n attribute_proxy = tango.AttributeProxy(configuration[\"jones_matrix_subscription_point\"])\n attribute_proxy.ping()\n event_id = attribute_proxy.subscribe_event(\n tango.EventType.CHANGE_EVENT,\n device._jones_matrix_event_callback\n )\n device._events_telstate[event_id] = attribute_proxy\n\n # Configure beamWeightsSubscriptionPoint\n if \"timing_beam_weights_subscription_point\" in configuration:\n device._last_received_beam_weights= \"{}\"\n attribute_proxy = tango.AttributeProxy(configuration[\"timing_beam_weights_subscription_point\"])\n attribute_proxy.ping()\n event_id = attribute_proxy.subscribe_event(\n tango.EventType.CHANGE_EVENT,\n device._beam_weights_event_callback\n )\n device._events_telstate[event_id] = attribute_proxy\n\n # Configure rfiFlaggingMask.\n if \"rfi_flagging_mask\" in configuration:\n device._group_vcc.write_attribute(\n \"rfiFlaggingMask\",\n json.dumps(configuration[\"rfi_flagging_mask\"])\n )\n else:\n log_msg = \"'rfiFlaggingMask' not given. Proceeding.\"\n self.logger.warn(log_msg)\n\n # Configure searchWindow.\n if \"search_window\" in configuration:\n for search_window in configuration[\"search_window\"]:\n search_window[\"frequency_band\"] = common_configuration[\"frequency_band\"]\n search_window[\"frequency_band_offset_stream_1\"] = \\\n device._frequency_band_offset_stream_1\n search_window[\"frequency_band_offset_stream_2\"] = \\\n device._frequency_band_offset_stream_2\n if search_window[\"frequency_band\"] in [\"5a\", \"5b\"]:\n search_window[\"band_5_tuning\"] = common_configuration[\"band_5_tuning\"]\n # pass on configuration to VCC\n data = tango.DeviceData()\n data.insert(tango.DevString, json.dumps(search_window))\n device._group_vcc.command_inout(\"ConfigureSearchWindow\", data)\n else:\n log_msg = \"'searchWindow' not given.\"\n self.logger.warn(log_msg)\n\n # TODO: the entire vcc configuration should move to Vcc\n # for now, run ConfigScan only wih the following data, so that\n # the obsState are properly (implicitly) updated by the command\n # (And not manually by SetObservingState as before)\n\n ######## FSP #######\n # Configure FSP.\n for fsp in configuration[\"fsp\"]:\n # Configure fspID.\n fspID = int(fsp[\"fsp_id\"])\n proxy_fsp = device._proxies_fsp[fspID - 1]\n\n device._group_fsp.add(device._fqdn_fsp[fspID - 1])\n device._group_fsp_corr_subarray.add(device._fqdn_fsp_corr_subarray[fspID - 1])\n device._group_fsp_pss_subarray.add(device._fqdn_fsp_pss_subarray[fspID - 1])\n device._group_fsp_pss_subarray.add(device._fqdn_fsp_pst_subarray[fspID - 1])\n\n # change FSP subarray membership\n proxy_fsp.AddSubarrayMembership(device._subarray_id)\n\n # Configure functionMode.\n proxy_fsp.SetFunctionMode(fsp[\"function_mode\"])\n\n # subscribe to FSP state and healthState changes\n event_id_state, event_id_health_state = proxy_fsp.subscribe_event(\n \"State\",\n tango.EventType.CHANGE_EVENT,\n device._state_change_event_callback\n ), proxy_fsp.subscribe_event(\n \"healthState\",\n tango.EventType.CHANGE_EVENT,\n device._state_change_event_callback\n )\n device._events_state_change_fsp[int(fsp[\"fsp_id\"])] = [event_id_state,\n event_id_health_state]\n \n # Add configID to fsp. It is not included in the \"FSP\" portion in configScan JSON\n fsp[\"config_id\"] = common_configuration[\"config_id\"]\n fsp[\"frequency_band\"] = common_configuration[\"frequency_band\"]\n fsp[\"band_5_tuning\"] = common_configuration[\"band_5_tuning\"]\n fsp[\"frequency_band_offset_stream_1\"] = device._frequency_band_offset_stream_1\n fsp[\"frequency_band_offset_stream_2\"] = device._frequency_band_offset_stream_2\n\n if fsp[\"function_mode\"] == \"CORR\":\n if \"receptor_ids\" not in fsp:\n # TODO In this case by the ICD, all subarray allocated resources should be used.\n fsp[\"receptor_ids\"] = [device._receptors[0]]\n device._corr_config.append(fsp)\n device._corr_fsp_list.append(fsp[\"fsp_id\"])\n \n # TODO currently only CORR function mode is supported outside of Mid.CBF MCS\n elif fsp[\"function_mode\"] == \"PSS-BF\":\n for searchBeam in fsp[\"search_beam\"]:\n if \"receptor_ids\" not in searchBeam:\n # In this case by the ICD, all subarray allocated resources should be used.\n searchBeam[\"receptor_ids\"] = device._receptors\n device._pss_config.append(fsp)\n device._pss_fsp_list.append(fsp[\"fsp_id\"])\n elif fsp[\"function_mode\"] == \"PST-BF\":\n for timingBeam in fsp[\"timing_beam\"]:\n if \"receptor_ids\" not in timingBeam:\n # In this case by the ICD, all subarray allocated resources should be used.\n timingBeam[\"receptor_ids\"] = device._receptors\n device._pst_config.append(fsp)\n device._pst_fsp_list.append(fsp[\"fsp_id\"])\n\n # Call ConfigureScan for all FSP Subarray devices (CORR/PSS/PST)\n\n # NOTE:_corr_config is a list of fsp config JSON objects, each \n # augmented by a number of vcc-fsp common parameters \n # created by the function _validate_scan_configuration()\n if len(device._corr_config) != 0: \n #device._proxy_corr_config.ConfigureFSP(json.dumps(device._corr_config))\n # Michelle - WIP - TODO - this is to replace the call to \n # _proxy_corr_config.ConfigureFSP()\n for this_fsp in device._corr_config:\n try: \n this_proxy = device._proxies_fsp_corr_subarray[int(this_fsp[\"fsp_id\"])-1]\n this_proxy.ConfigureScan(json.dumps(this_fsp))\n except tango.DevFailed:\n msg = \"An exception occurred while configuring \" \\\n \"FspCorrSubarray; Aborting configuration\"\n device._raise_configure_scan_fatal_error(msg)\n\n # NOTE: _pss_config is costructed similarly to _corr_config\n if len(device._pss_config) != 0:\n for this_fsp in device._pss_config:\n try:\n this_proxy = device._proxies_fsp_pss_subarray[int(this_fsp[\"fsp_id\"])-1]\n this_proxy.ConfigureScan(json.dumps(this_fsp))\n except tango.DevFailed:\n msg = \"An exception occurred while configuring \" \\\n \"FspPssSubarray; Aborting configuration\"\n device._raise_configure_scan_fatal_error(msg)\n\n # NOTE: _pst_config is costructed similarly to _corr_config\n if len(device._pst_config) != 0:\n for this_fsp in device._pst_config:\n try:\n this_proxy = device._proxies_fsp_pst_subarray[int(this_fsp[\"fsp_id\"])-1]\n this_proxy.ConfigureScan(json.dumps(this_fsp))\n except tango.DevFailed:\n msg = \"An exception occurred while configuring \" \\\n \"FspPstSubarray; Aborting configuration\"\n device._raise_configure_scan_fatal_error(msg)\n\n # TODO add VLBI to this once they are implemented\n # what are these for?\n device._fsp_list[0].append(device._corr_fsp_list)\n device._fsp_list[1].append(device._pss_fsp_list)\n device._fsp_list[2].append(device._pst_fsp_list)\n\n #save configuration into latestScanConfig\n device._latest_scan_config = str(configuration)\n message = \"CBFSubarray Configure command completed OK\"\n self.logger.info(message)\n return (ResultCode.OK, message)",
"def ev_textinput(self, event: tcod.event.TextInput) -> T | None:"
] |
[
"0.69072264",
"0.6673858",
"0.6624353",
"0.6555153",
"0.6445983",
"0.6402754",
"0.6364852",
"0.6353404",
"0.6157712",
"0.6112686",
"0.6071804",
"0.606225",
"0.60263073",
"0.59862846",
"0.5978435",
"0.5901954",
"0.5878921",
"0.5858231",
"0.5857707",
"0.5841894",
"0.582443",
"0.5819965",
"0.58034754",
"0.5803119",
"0.57978183",
"0.578216",
"0.5780776",
"0.5780016",
"0.57560444",
"0.5755917",
"0.5739044",
"0.57384145",
"0.5718915",
"0.5702409",
"0.56932503",
"0.56811243",
"0.5670983",
"0.566672",
"0.5613324",
"0.56038445",
"0.5592659",
"0.55873096",
"0.55849314",
"0.5578027",
"0.5577159",
"0.5574318",
"0.5570943",
"0.5563371",
"0.556247",
"0.5532644",
"0.55202997",
"0.5517158",
"0.551568",
"0.54962987",
"0.54737437",
"0.54703885",
"0.5468459",
"0.5456506",
"0.54560846",
"0.545428",
"0.54506135",
"0.54374063",
"0.5428891",
"0.5418316",
"0.5414147",
"0.54136324",
"0.5404659",
"0.5402019",
"0.5401413",
"0.5396791",
"0.53903514",
"0.53818005",
"0.5380609",
"0.5379845",
"0.5374988",
"0.5374326",
"0.53618515",
"0.53204757",
"0.5318917",
"0.5317265",
"0.5314883",
"0.5314883",
"0.53148305",
"0.53134555",
"0.5308691",
"0.5306806",
"0.53067076",
"0.5301349",
"0.5301349",
"0.52997774",
"0.5286228",
"0.52844477",
"0.52695394",
"0.52669656",
"0.5263766",
"0.5258175",
"0.5254363",
"0.52515775",
"0.5251541",
"0.5247307",
"0.52442354"
] |
0.0
|
-1
|
Prepares session queue for communication
|
def queue_communication(self, session):
# Here we can queue all communication to be sent to the Client
# Examples follow...
session['queue'].append(GetObjects())
session['queue'].append(DeleteObjects())
session['queue'].append(RpcExecute())
session['queue'].append(GetDeviceInfo())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _load_session(self):\n if not self.session.container_id:\n self.start()\n self._check_fifo()\n self._update_container()",
"def init_session(self):\n pass",
"def init_session(self):\n pass",
"def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"screen-length 0 temporary\")\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def add_nb_queue_to_session_queue(self, session):\n rpc_list = []\n client_id = get_element('cid', session['client'])\n\n if client_id is not None and client_id in RPCS.Northbound_Queue:\n # Check if all commands have been serviced\n if RPCS.Northbound_Queue[client_id]:\n # Get first request in the client queue, in the form:\n # (Client_COMMAND, RESPONSE STREAM)\n # TODO pop might be unresolved\n nb_request = RPCS.Northbound_Queue[client_id].pop(0)\n # Parse and queue request(s)\n client_command = nb_request[0]\n rpc_list.append(client_command)\n # Insert nb commands to the front of queue\n session['queue'] = queued_nb_methods + session['queue']\n # Store stream which expects the client response in the session\n session['nb_response_stream'] = nb_request[1]",
"def init_email_queue():\n g.setdefault('email_queue', [])",
"def __init__(self, session_key):\n super(Talk, self).__init__()\n self.session_key = session_key\n self.tcp_clients = []\n self.participants = []\n if self.session_key in Talk.talk_sessions:\n raise NameError(\"There already exists a session with that number\")\n Talk.talk_sessions[self.session_key] = self\n self.action_queue = Queue.Queue()\n self.talk_queue = Queue.Queue()\n self.udp_server = None",
"def __init__(self):\r\n self.queue = []",
"def __init__(self):\r\n self.queue = []",
"def session_preparation(self) -> None:\n self._test_channel_read(pattern=r\">\")\n self.set_base_prompt()\n self.disable_paging(command=\"set length 0\")",
"def __init__(self):\n self._queue_items = []",
"def __init__(self): \n self.queue = []",
"def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)",
"def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)",
"def preProcess(self, msg):\n\n # open connection\n self.conn = Connection(\n user=self.user, password=self.password,\n vhost=self.vhost, host=self.host,\n heartbeat=self.heartbeat, debug=self.debug)\n\n # create AMQP channel\n self.channel = self.conn.channel()\n self.channel.exchange.declare(self.exchange, self.exchange_type)\n self.channel.queue.declare(self.queue, self.auto_delete)\n self.channel.queue.bind(self.queue, self.exchange, self.routing_key)",
"def _create_incoming_queue(self):\n\n connection = self.connection\n session = self.connection.session\n uuid = str(qpid.datatypes.uuid4())\n\n incoming = session.incoming(uuid)\n session.message_subscribe(\n queue=self.name,\n destination=uuid,\n )\n\n try:\n yield incoming\n finally:\n try:\n incoming.stop()\n except connection.backend.connection_errors:\n pass\n with session.lock:\n try:\n del session._incoming[uuid]\n except KeyError:\n pass",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def __init__(self):\n self.queue = []",
"def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"session paginate disable\")\n self.set_terminal_width(command='terminal width 511')",
"def __init__(self):\n self.queues=[]",
"async def start_session(self):\n\t\t...",
"def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"terminal datadump\")\n\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def __init__(self) -> None:\n self._queue = []",
"def prepare_invoking(self, requests_session):\n raise NotImplementedError",
"def session_preparation(self):\n # 0 will defer to the global delay factor\n delay_factor = self.select_delay_factor(delay_factor=0)\n self._test_channel_read()\n self.set_base_prompt()\n cmd = f\"{self.RETURN}set cli mode -page OFF{self.RETURN}\"\n self.disable_paging(command=cmd)\n time.sleep(1 * delay_factor)\n self.set_base_prompt()\n time.sleep(0.3 * delay_factor)\n self.clear_buffer()",
"def startSession(self):\n self.storage.insert(self.__json__())",
"def prepare_for_session(self, session_pipeline):\n\n self._aprs_service._active_session_pipeline = session_pipeline # Used to load live_craft_position service\n self._aprs_service.start_tracker()",
"def __init__(self, session: aiowamp.SessionABC) -> None:\n self.session = session\n self.id_gen = IDGenerator()\n\n self.__awaiting_reply = {}\n self.__ongoing_calls = {}\n self.__running_procedures = {}\n\n self.__procedure_ids = {}\n self.__procedures = {}\n\n self.__sub_ids = {}\n self.__sub_handlers = {}\n\n self.session.add_message_handler(self.__handle_message)",
"def before_request(self):\n g.start_time = dt.datetime.now()\n if 'UUID' not in session.keys() or not self.redis.zrank(SORTED_SESSION_LIST, session['UUID']):\n _uuid = session.get('UUID', default=str(uuid.uuid4()))\n session['UUID'] = _uuid\n s = dict(\n user_agent=request.user_agent.string,\n ua_browser=request.user_agent.browser,\n ua_language=request.user_agent.language,\n ua_platform=request.user_agent.platform,\n ua_version=request.user_agent.version,\n )\n self.store_session(_uuid, s)",
"def __init__(self):\n self.queue = Queue()",
"def prepareflow(self):\r\n self.time = 0\r\n #self.timeout = timeout\r\n Dummy = things(0,0,0)\r\n Dummy.threadqueue.append(-1)\r\n Dummy.s=self\r\n Dummy.name = 'End of simulation.'\r\n heappush(self.queue, (self.timeout, (Dummy, 1000)))\r\n #while true:\r\n #(now, (item, i)) = heappop(self.queue)\r\n #if now >= timeout: break\r",
"def __init__(self):\r\n self.queue = []\r\n self.current = False",
"def prepare_acquisition(self):\n self.lib.PrepareAcquisition()",
"def __init__(self, queue, usercallback):\n self.queue = queue\n self.usercallback = usercallback",
"def _push_queue(self):\n\n self.add_cons_vars(self._var_queue, sloppy=self.sloppy)\n self.add_cons_vars(self._cons_queue, sloppy = self.sloppy)\n\n if len(self._var_queue) > 0:\n self.regenerate_variables()\n if len(self._cons_queue) > 0:\n self.regenerate_constraints()\n\n self._var_queue = list()\n self._cons_queue = list()",
"def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover",
"def __init__(self):\n self.messageSet = set()\n self.messageQueue = deque()",
"def initiate_session(self):\n # this will run a infinite loop simulating a user using the application. Timeout = 30min\n\n self.push_to_kafka('open app')\n\n # session duration time\n session_time = random.randint(500, 3000) \n logging.info('Profile session: {} --> SESSION TIME: {}'.format(self.profile['username'], session_time))\n\n\n start_time = time.time()\n while self.elepsed_session_time < session_time: \n #logging.info('Elepsed session time: {} from {} '.format(str(round(self.elepsed_session_time)), session_time))\n #for _ in range(10):\n # self.play_song()\n\n if self.application_state == 'STOPPED':\n self.play_song()\n elif self.application_state == 'PLAYING':\n song_elapsed_time = self.elepsed_session_time - self.song_started_at\n # logging.info('Song: {} - Elepsed song time: {} from {} - Percentage? - {} || Session duration: {}'.format(self.current_song['name'],\n # round(song_elapsed_time), \n # self.current_song['duration_ms']/1000,\n # self.listen_song_percentage,\n # session_time))\n if song_elapsed_time >= (self.current_song['duration_ms']/1000) * self.listen_song_percentage:\n self.change_song()\n \n\n \n time.sleep(0.3)\n self.elepsed_session_time = time.time() - start_time \n \n # finished session\n if self.application_state == 'PLAYING':\n self.stop_song()\n\n self.close_session()\n\n return",
"def setup_queue(self):\n self.logger.info('declaring queue %s', self.queue)\n if self.otq:\n self._channel.queue_declare(self.on_queue_declareok, self.queue, auto_delete=True)\n else:\n self._channel.queue_declare(self.on_queue_declareok, self.queue)",
"def __init__(self):\n Session.SESSIONS_COUNT += 1\n self.channelCount = 0\n self._channels = []\n self._error = ''\n self._client = None",
"def perform_setup():\n global credentials, connection, channel\n credentials = pika.PlainCredentials('guest', 'guest') # AUTH via Default guest user on RabbitMQ\n connection = pika.BlockingConnection(pika.ConnectionParameters(\"127.0.0.1\", 5672, '/', credentials)) # Using rabbit-mq container name to access the RabbitMQ container from other containers\n channel = connection.channel()\n channel.queue_declare(queue='poll', durable=True)",
"def init_session(self):\n self._session = requests.Session()",
"def on_session_started(session_started_request, session):",
"def __init__(self, session):\n self.session = session",
"def on_init(self, queue=None, **kwargs):\n self.queue = queue if queue else Queue()",
"async def test_enqueue_new_session_message(self):\n session = _create_test_session(asyncio.get_running_loop())\n\n orig_ctx = get_script_run_ctx()\n ctx = ScriptRunContext(\n session_id=\"TestSessionID\",\n _enqueue=session._enqueue_forward_msg,\n query_string=\"\",\n session_state=MagicMock(),\n uploaded_file_mgr=MagicMock(),\n page_script_hash=\"\",\n user_info={\"email\": \"test@test.com\"},\n )\n add_script_run_ctx(ctx=ctx)\n\n mock_scriptrunner = MagicMock(spec=ScriptRunner)\n session._scriptrunner = mock_scriptrunner\n\n # Send a mock SCRIPT_STARTED event.\n session._on_scriptrunner_event(\n sender=mock_scriptrunner,\n event=ScriptRunnerEvent.SCRIPT_STARTED,\n page_script_hash=\"\",\n )\n\n # Yield to let the AppSession's callbacks run.\n await asyncio.sleep(0)\n\n sent_messages = session._browser_queue._queue\n self.assertEqual(2, len(sent_messages)) # NewApp and SessionState messages\n\n # Note that we're purposefully not very thoroughly testing new_session\n # fields below to avoid getting to the point where we're just\n # duplicating code in tests.\n new_session_msg = sent_messages[0].new_session\n self.assertEqual(\"mock_scriptrun_id\", new_session_msg.script_run_id)\n\n self.assertTrue(new_session_msg.HasField(\"config\"))\n self.assertEqual(\n config.get_option(\"server.allowRunOnSave\"),\n new_session_msg.config.allow_run_on_save,\n )\n\n self.assertTrue(new_session_msg.HasField(\"custom_theme\"))\n self.assertEqual(\"black\", new_session_msg.custom_theme.text_color)\n\n init_msg = new_session_msg.initialize\n self.assertTrue(init_msg.HasField(\"user_info\"))\n\n self.assertEqual(\n list(new_session_msg.app_pages),\n [\n AppPage(page_script_hash=\"hash1\", page_name=\"page1\", icon=\"\"),\n AppPage(page_script_hash=\"hash2\", page_name=\"page2\", icon=\"🎉\"),\n ],\n )\n\n add_script_run_ctx(ctx=orig_ctx)",
"async def prepare(self):\n session_id = self.get_secure_cookie('suid')\n if session_id:\n try:\n session_service = SessionsService(db=self.db)\n session_service_get_res = await session_service.get(\n session_id=session_id.decode('utf-8')\n )\n if session_service_get_res['status_code'] == 200:\n self.session = session_service_get_res['data']\n\n except tornado.gen.TimeoutError as err:\n logger.critical(err, exc_info=True)\n loader = tornado.template.Loader(self.get_template_path())\n data = loader.load('timeout.html').generate(\n static_url=self.static_url)\n\n self.set_status(503)\n self.write(data)\n await self.finish()\n else:\n self.session = None",
"def _start_send_to_queue(self):\n while True:\n message_to_send = str(self.send_message_queue.get())\n if self.verbose: print \"Sending\", message_to_send\n send_msg(self.TCPSock, message_to_send)\n # self.TCPSock.send(message_to_send)",
"def __init__(self):\n self.push_queue = []\n self.pop_queue = []",
"def request_session(self):\n if not hasattr(self, \"_request_session\"):\n rqsid = self.shared_vars.pop(\"rqsid\", \"\")\n rqses = self.request_session_manager.pop_request_session(rqsid)\n\n if not rqses:\n if self.is_action():\n del session['VDOM_API_SESSIONS']\n raise RequestSessionDoesntExist\n\n rqses = self.request_session_manager.create_request_session()\n\n else:\n uuid = rqses[\"rqsid_uuid\"]\n if not self.verify_request_session_key(rqsid, uuid):\n del session['VDOM_API_SESSIONS']\n raise RequestSessionInvalidKey\n\n self._request_session = rqses\n\n return self._request_session",
"def BeginSession( self ) : \r\n\r\n ## self._connection.write( 'Q%s' % ( systemSpec, ) ) \r\n ## assert self.GetServerResponse() == True # \" the quick-&-dirty way \" // to-do: create an own exception \r\n\r\n message = self._fmt.pack( 'Q', self._system_spec )\r\n self._socket.write( message ) \r\n\r\n # debug\r\n print \"BS: \", message \r\n\r\n return self.GetServerResponse()",
"def __init__(self):\n self._data_queue = []\n self._access_queue_lock = Lock()",
"def __init__(self, session):\n self._session = session",
"def reset_queue(self, db_session):\n for player in self.player_queue.queue:\n self.command_queue.appendleft(('_delete_last_row', {}))\n self.player_queue = PlayerQueue.PlayerQueue()\n db_session.execute(sqlalchemy.update(db.User.__table__, values={db.User.__table__.c.times_played: 0}))\n self._add_to_chat_queue('The queue has been emptied and all players start fresh.')",
"def pytest_sessionstart(self, session):\n self.nodemanager = NodeManager(self.config)\n nodes = self.nodemanager.setup_nodes(putevent=self.queue.put)\n self._active_nodes.update(nodes)\n self._session = session",
"def __init__(self):\n self.shared_counter = 0\n self.queue = list()\n self.queue_size = 0",
"def session(self):",
"def _setup_tubes(self):\n chan = self.channel\n inp = self.config[self.MODULE_NAME]['amqp']['in']\n out = self.config[self.MODULE_NAME]['amqp']['out']\n if inp['exchange']:\n log.info('generating Input Queue'+ str(inp))\n chan.exchange_declare(**inp)\n self.qname = chan.queue_declare(exclusive=True).queue\n chan.queue_bind(exchange=inp['exchange'],queue=self.qname)\n self.consume = lambda cb : chan.basic_consume(cb,queue=self.qname,no_ack=True)\n self.start_loop = lambda : pika.asyncore_loop()\n\n if out['exchange']:\n log.info('generating Output Exchange'+ str(out))\n chan.exchange_declare(**out)\n self.publish = lambda msg: self.channel.basic_publish(exchange=out['exchange'],routing_key='',body=msg)",
"def __post_init__(self):\n self._session = Session()\n self._post_hooks()",
"def session_preparation(self):\n self.ansi_escape_codes = True\n self._test_channel_read()\n self.set_base_prompt()\n self.set_terminal_width(command=\"terminal width 511\", pattern=\"terminal\")\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()",
"def sync(self):\n for subscription in self.getSubscriptionList():\n #user_id = subscription.getZopeUser()\n #uf = self.getPortalObject().acl_users\n #user = uf.getUserById(user_id).__of__(uf)\n #newSecurityManager(None, user)\n subscription.activate(activity='SQLQueue',\n tag=subscription.getId(),\n priority=ACTIVITY_PRIORITY\n ).SubSync(subscription.getPath())",
"def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()",
"def __init__(self, parallel_server):\n self.queue = deque()\n self.parallel_server = parallel_server",
"def setup_queue(self, method_frame):\n logger.info('Declaring queue %s', self.queue_name)\n # self._channel.queue_declare(self.on_queue_declareok, queue_name)\n\n self._channel.queue_declare(self.on_queue_declareok, exclusive=False, durable=True, queue=self.queue_name)",
"def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data",
"def __init__(self, out_queue):\n logging.Handler.__init__(self)\n self.oqueue = out_queue\n self.session = None",
"def on_session_started(session_started_request, session):\n \n #session.attributes['result_number'] = 1\n session['attributes'] = {}\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])",
"def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId'] + \", sessionId=\" + session['sessionId'])\n \n session['attributes'] = {\"currentQuestion\":0, \"score\":0, \"date\":datetime.datetime.now().strftime(\"%B-%d-%Y %I:%M%p\"), \"billNo\":\"\", \"age\":\"\", \"result\":[]}",
"def __init__(self):\n self._queue = defaultdict(\n OrderedDict\n ) # type: Dict[OrderedDict[TelnetCommand, ExpectedResponse]]",
"def prepare_queue_after_restart(options):\n if TEST_MODE:\n global task_number\n try:\n task_number\n except NameError:\n task_number = -1\n task_number += 1\n fake_class = SQS_Queue(options['queue']['name'])\n return options['task_data'], fake_class\n # Connection to SQS\n queue = SQS_Queue(\n name=options['queue']['queue_name'],\n region=options['queue']['conn_region']\n )\n # Create a new message\n queue.currentM = queue.q.message_class()\n # Fill message\n queue.currentM.body = options['queue']['body']\n queue.currentM.attributes = options['queue']['attributes']\n queue.currentM.md5_message_attributes = \\\n options['queue']['md5_message_attributes']\n queue.currentM.message_attributes = options['queue']['message_attributes']\n queue.currentM.receipt_handle = options['queue']['receipt_handle']\n queue.currentM.id = options['queue']['id']\n queue.currentM.md5 = options['queue']['md5']\n return options['task_data'], queue",
"def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))",
"def __init__(self, *args, **kwargs):\n self.session = requests.Session()\n access_token = get_process_execution_user_token()\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"\n self.session.headers[\"content-type\"] = \"application/json\"",
"def use_mandatory_session_management(self):\n # Session state will be saved and can not be closed by consumers\n self._session_management = MANDATORY",
"def connection_made(self, transport):\n super().connection_made(transport)\n\n try:\n self.session = self.server.create_session(transport)\n except AllocationError:\n # An ID could not be allocated for a new session; refuse\n # connection.\n self.logger.warning('Failed to allocate an ID for a new session!')\n self.logger.warning('Refusing connection.')\n transport.close()\n else:\n self.session.on_connected()",
"def on_start(self, session):\n pass",
"def __init__(self):\n\n self.lastcid=0\n self.calls = { }\n\n SessionList.__init__(self)",
"def on_session_started():\n #print(\"on_session_started\")",
"def on_session_started():\n #print(\"on_session_started\")",
"def __init__(self):\n\n self._session = requests.Session()",
"def connection_made(self, transport):\n super().connection_made(transport)\n\n self.session = self.client.create_session(transport)\n self.session.on_connected()",
"def on_session_started(session_started_request, session):\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n # any initialization logic goes here",
"def __init__(self):\n self.queue = deque()",
"def __init__(self):\n self.queue = deque()",
"def __init__(self):\n self.queue = deque()",
"def __init__(self):\n self.queue = deque()",
"def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()",
"async def declare(self) -> 'Queue':\n # we are relying to this in other functions\n self._channel = await self._backend.channel()\n self.log.debug(\"Channel acquired CHANNEL%i\",\n self._channel.channel_number)\n\n if self.exchange:\n await self.declare_exchange()\n\n if self.name is not None:\n await self.declare_queue()\n\n if self.exchange:\n await self.bind_queue()\n\n return self",
"def add_a_queue(self, size):\n \tself.queues.append(ContextModalityQueue(size))",
"def _process_chat_queue(self, chat_queue):\n while self.allowed_to_chat:\n if len(chat_queue) > 0:\n self.ts.send_message(chat_queue.pop())\n time.sleep(.5)",
"def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass",
"def new_queue(self, params, maxUsers=1):\n self.spotify.reset_queue()\n\n #Extract parameters\n mood = params[0]\n users = []\n for i in range(maxUsers):\n if len(params[i + 1]) > 0:\n users.append(params[i + 1])\n\n #Add default host if only one guest is present\n # if len(users) == 1:\n # users.append('Paolo')\n\n #Lists to load\n names = []\n for n in self.listIDs.keys():\n for u in users:\n if len(mood) > 0:\n if u + ':' + mood in n:\n names.append(n)\n else:\n if 'top:' + u in n:\n names.append(n)\n\n\n self.add_playlist(names)",
"def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass",
"def declare(self):\n self.channel.queue_declare(queue='files_to_database')",
"def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))",
"def __init__(self):\n self.data = Queue()",
"def __init__(self):\n Queue.__init__(self)",
"def init_queues(self):\n for step in self.get_children():\n if step.kind == StepKinds.queue:\n step.init_object(self.context, None)"
] |
[
"0.62144345",
"0.613409",
"0.613409",
"0.59782755",
"0.5936927",
"0.593133",
"0.59233063",
"0.58450776",
"0.58450776",
"0.582604",
"0.58232695",
"0.5806154",
"0.5799438",
"0.5799438",
"0.5799438",
"0.5795595",
"0.57882345",
"0.57882345",
"0.57882345",
"0.57882345",
"0.57882345",
"0.57646435",
"0.5731056",
"0.56998295",
"0.56925356",
"0.5692182",
"0.5686441",
"0.56817925",
"0.56409425",
"0.5636603",
"0.5621356",
"0.5574592",
"0.55567837",
"0.5545513",
"0.553351",
"0.55310106",
"0.5506894",
"0.5496458",
"0.54915017",
"0.5481453",
"0.5470425",
"0.54636997",
"0.5462943",
"0.5438951",
"0.54292476",
"0.5426905",
"0.5425022",
"0.54212844",
"0.54185015",
"0.54171866",
"0.5416266",
"0.5411931",
"0.53939927",
"0.5392834",
"0.5392295",
"0.5386302",
"0.5385775",
"0.53748614",
"0.5372704",
"0.5352475",
"0.53459567",
"0.5342348",
"0.53414416",
"0.53277284",
"0.5317555",
"0.5305681",
"0.5270549",
"0.52610815",
"0.525571",
"0.52536774",
"0.5228723",
"0.5224345",
"0.522404",
"0.5223894",
"0.52231574",
"0.5209557",
"0.5206618",
"0.5194404",
"0.5193868",
"0.51567984",
"0.51567984",
"0.5155872",
"0.5154003",
"0.5142693",
"0.51335084",
"0.51335084",
"0.51335084",
"0.51335084",
"0.5121411",
"0.5119917",
"0.51191694",
"0.51184267",
"0.5114804",
"0.51141596",
"0.5112171",
"0.51013935",
"0.51005447",
"0.50988865",
"0.50957966",
"0.5087983"
] |
0.59243166
|
6
|
Parses a given protocol string to check if it is in proper formatting
|
def parse_protocol_version(self, version_string_list):
# Verify for every provided string if it is in proper versioning format
for version_string in version_string_list:
try:
parsed_version_string = version_string.split('.')
if len(parsed_version_string) == 1 and version_string.isdigit():
# No dots in version string, it is a simple integer.
continue
StrictVersion(version_string)
except (AttributeError, ValueError):
LOG.debug('Invalid protocol version string provided')
return version_string
# Check for malformatting
for i in range(len(parsed_version_string)):
if len(parsed_version_string[i]) > 1:
if parsed_version_string[i][0] == '0': # Leading 0's
return version_string
if len(parsed_version_string[i]) < 1: # Empty strings
return version_string
# Protocol version formating: OK
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _verify_format(s, format):\n r = re.compile(format)\n if r.match(s) is not None:\n return True\n return False",
"def proto_check(proto):\n # Check for TCP\n if proto == 6:\n return 'tcp'\n # Check for UDP\n elif proto == 17:\n return 'udp'\n else:\n return None",
"def is_compatible(address, protocol=1):\n bracketed = [word.strip('[]') for word\n in re.findall('\\[[^\\]]*\\]', address)]\n not_bracketed = re.split('\\[[^\\]]*?\\]', address)\n if protocol == 1:\n if has_reflection(bracketed):\n return False\n return has_reflection(not_bracketed)\n elif protocol == 2:\n patterns = extract_protocol_patterns(bracketed)\n for pattern in patterns:\n inverse_pattern = pattern[1] + pattern[0] + pattern[1]\n if any([inverse_pattern in word for word in not_bracketed]):\n return True\n return False\n else:\n raise ValueError('unknown protocol')",
"def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False",
"def _is_valid_fmt(self, fmt):\n # make sure there is no leading or trailing whitespace\n fmt = fmt.strip()\n \n if fmt[0] != '%':\n return False\n \n # Handle business calendars first.\n # This does not check the calendar name.\n if fmt[1:3] == \"tb\" or fmt[1:4] == \"-tb\":\n return True if TB_FMT_RE.match(fmt) else False\n \n # date formats\n if fmt[1] == 't' or fmt[1:3] == '-t':\n return True if TIME_FMT_RE.match(fmt) else False\n \n # categorize using last character\n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width > 2045: return False\n return True\n elif last_char == 'H' or last_char == 'L': # binary\n # Valid binary formats are ^%(8|16)(H|L)$. Stata doesn't raise \n # error with -8 or -16, but the results are perhaps unexpected.\n return True if fmt[1:-1] in ('8', '16', '-8', '-16') else False\n elif last_char == 'x': # hexadecimal\n return True if fmt == '%21x' or fmt == '%-12x' else False\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width <= int(m.group(5)) or width > 2045: \n return False\n return True\n \n return False",
"def _is_valid_fmt(self, fmt):\n # make sure there is no leading or trailing whitespace\n fmt = fmt.strip()\n \n if fmt[0] != '%':\n return False\n \n # Handle business calendars first.\n # This does not check the calendar name.\n if fmt[1:3] == \"tb\" or fmt[1:4] == \"-tb\":\n return True if TB_FMT_RE.match(fmt) else False\n \n # date formats\n if fmt[1] == 't' or fmt[1:3] == '-t':\n return True if TIME_FMT_RE.match(fmt) else False\n \n # categorize using last character\n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width > 244: return False\n return True\n elif last_char == 'H' or last_char == 'L': # binary\n # Valid binary formats are ^%(8|16)(H|L)$. Stata doesn't raise \n # error with -8 or -16, but the results are perhaps unexpected.\n return True if fmt[1:-1] in ('8', '16', '-8', '-16') else False\n elif last_char == 'x': # hexadecimal\n return True if fmt == '%21x' or fmt == '%-12x' else False\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n if not m: return False\n width = int(m.group(3))\n if width == 0 or width <= int(m.group(5)) or width > 244: \n return False\n return True\n \n return False",
"def is_valid_ssdp_packet(data: bytes) -> bool:\n return (\n bool(data)\n and b\"\\n\" in data\n and (\n data.startswith(b\"NOTIFY * HTTP/1.1\")\n or data.startswith(b\"M-SEARCH * HTTP/1.1\")\n or data.startswith(b\"HTTP/1.1 200 OK\")\n )\n )",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def _is_format_endpoint(pattern):\n return '?P<format>' in pattern._regex",
"def valid_format(s):\n if len(s) > 7:\n return False\n elif '0' in s and len(s) == 1:\n return True\n elif s[0] == '0' and len(s) > 1:\n return False\n elif introcs.isalpha(s):\n return False\n elif (len(s) > 3) and (introcs.count_str(s, ',') == 0):\n return False\n elif introcs.count_str(s, ',') == 0:\n return introcs.isdecimal(s)\n elif introcs.count_str(s, ',') > 1:\n return False\n elif ',' in s and introcs.count_str(s,',') == 1:\n comma_check = s[introcs.find_str(s, ',')+1:]\n before_comma_check = s[:introcs.find_str(s, ',')]\n introcs.isdecimal(before_comma_check)\n return (True if len(comma_check) == 3 else False) and introcs.isdecimal(before_comma_check)",
"def parse_address(address, sanity=True):\n address = address.split(':')\n address, port = ':'.join(address[:-1]), address[-1]\n\n guessed_type = 4\n if address.startswith('['):\n address = address[1:]\n guessed_type = 6\n if address.endswith(']') or (sanity and guessed_type == 6):\n if sanity:\n assert address.endswith(']')\n address = address[:-1]\n guessed_type = 6\n if address.count(':') > 3:\n if sanity:\n assert guessed_type == 6\n guessed_type = 6\n\n return address, int(port), guessed_type",
"def validate(net_string):\n\n host, port = net_string.split(':')\n validate_host(host)\n validate_port(port)\n return (host, port)",
"def tfcProtocol(contactString):\n args = urlsplit(contactString)[3]\n value = args.replace(\"protocol=\", '')\n return value",
"def __detect_type__(self, value):\n def is_ipv6_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split(':')\n for part in parts:\n if part == '':\n continue\n part = int(part, 16)\n if part < 0:\n raise ValueError\n return True\n except Exception:\n return False\n\n def is_ipv4_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split('.', 3)\n for part in parts:\n part = int(part)\n if part < 0 or part > 255:\n raise ValueError\n return True\n except: # noqa\n return False\n\n # Strip port\n if value.startswith('['):\n value = value[1:]\n try:\n value, port = value.split(':', 1)\n except: # noqa\n pass\n\n if value.endswith(']'):\n value = value[:-1]\n\n if is_ipv4_address(value):\n return 1, value, 'ipv4_address'\n\n elif is_ipv6_address(value):\n return 2, value, 'ipv6_address'\n\n else:\n return 0, value, 'hostname'",
"def __validate_conn_pattern(conns:str)->str:\n pattern1 = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n # pattern2 = re.compile(r'^\\w+:\\w+@\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:\\d{1,5}$')\n\n for conn in conns.split(\",\"):\n if not pattern1.match(conn) and not pattern2.match(conn):\n raise argparse.ArgumentTypeError(f'Invalid connection format: {conn}. Supported formats: 127.0.0.1:32049 or user:passwd@127.0.0.1:32049')\n\n return conns",
"def check_error_protocol_exists(self):\n p = self.test_proto.parse()\n if p.messages is not None:\n for k, m in p.messages.items():\n self.assertIsNotNone(m.errors, f\"Message {k} did not have the expected implicit string error protocol.\")",
"def parse_protocol_header(stream: BytesIO) -> Tuple[int, int, int]:\n prefix, *version = unpack('>5sBBB', _read(stream, 8))\n if prefix != b'AMQP\\x00':\n raise ValueError(\"wrong protocol, expected b'AMQP\\x00', got {}\".format(\n prefix\n ))\n return version",
"def protocol_match(cls, script_bytes: bytes) -> bool:\n # fast test -- most ScriptOutputs that aren't SLP will fail here quickly\n if not script_bytes.startswith(cls._protocol_prefix):\n return False\n # fast test passed -- next try the slow test -- attempt to parse and\n # validate OP_RETURN message\n try:\n # raises on parse error\n slf = cls(script_bytes)\n # should always be not None\n if slf.message is not None:\n # save parsed message since likely it will be needed again very soon\n # by class c'tor\n cls._script_message_cache.put(slf.script, slf.message)\n return True\n except Error:\n pass\n except Exception:\n # DEBUG XXX FIXME\n import sys\n import traceback\n\n traceback.print_exc(file=sys.stderr)\n pass\n return False",
"def test_is_valid_manifest_format_using_allowed_protocols(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_custom_url_protocols.tsv\",\n allowed_protocols=[\"s3\", \"gs\", \"http\", \"https\"],\n )\n error_log = caplog.text\n assert \"gs://test/test.txt\" not in error_log\n assert \"s3://testaws/aws/test.txt\" not in error_log\n assert \"https://www.uchicago.edu/about\" not in error_log\n assert \"http://en.wikipedia.org/wiki/University_of_Chicago\" not in error_log\n\n assert '\"s3://bucket_without_path\"' in error_log\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert result == False",
"def checkurlsbasic(urlstxt, protocolstxt, mdtype):\n # urlstxt consists of a list of urls, split by ;\n # for each url, check if it has been resolvable by inspecting the HTTP\n # code added to the URL\n score = 2\n if len(urlstxt) > 3: # TODO: take into account that starts with \" (HTTP \" as well, so longer url is needed\n # start with an empty score if there are urls\n score = 0\n urls = urlstxt.split(valuesep) # TODO: make configurable?\n errors = 0\n nrurls = 0\n for u in urls:\n if u != None:\n try:\n # u starts with \" (HTTP 0)\", so must be longer than\n if len(u) > 2 and u != \" (HTTP 0)\":\n # TODO: assume http 2xx, 3xx and 5xx-series (all!) are the only okay HTTP codes?\n # Or use others as well? Or just the start mubers, so 204 is also included\n # for 400-series: HTTP 400 should be okay, since this means that the client sent a wrong request (but the service still exists / works there, e.g. TMS)\n # TODO: use a list of HTTP codes that are okay.\n # Configurable\n nrurls = nrurls + 1\n if u.find(\"(HTTP 2\") == -1 and u.find(\"(HTTP 3\") == -1 and u.find(\"(HTTP 5\") == -1 and u.find(\"(HTTP 400\") == -1 and u.find(\"(HTTP 403\") == -1 and u.find(\"(HTTP intern)\") == -1:\n errors = errors + 1\n except Exception as e:\n logging.info(\"Checking URLs failed for: \" + u)\n logging.debug(str(e))\n errors = errors + 1\n if mdtype == \"dataset\" or mdtype == \"series\":\n if errors == 0:\n score = 2\n # if we don't have urls, but the service type contains OGC: .. , download or website, then we have an error\n # TODO: document\n if (protocolstxt.find(\"OGC:\") > -1 or protocolstxt.find(\"download\") > -1 or protocolstxt.find(\"website\") > -1) and nrurls == 0:\n score = 0\n # checkid = 6, so the index in the matrix is: 5\n result = checksdatasets[5][2][score]\n else:\n # there must be a URL as well, so check this\n if errors > 0 or nrurls == 0:\n score = 0\n else:\n score = 2\n result = checksservices[5][2][score]\n return MkmScore(urlstxt, score, result)",
"def parse_valid(self):\n try:\n self.test_proto.parse()\n except avro.errors.ProtocolParseException: # pragma: no coverage\n self.fail(f\"Valid protocol failed to parse: {self.test_proto!s}\")",
"def _parse(self, string):\n modern_scheme = r\"\"\"\nssh://\n(?:\n (?P<user>[^@]+)\n@)? # user is anything but @, then the @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(:(?P<port>\\d+))? # optional port\n(/(?P<remote_dir>.*))? # optional remote directory\n\"\"\"\n match = re.match(modern_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n old_scheme = \"\"\"\n(?P<user>[^@]+) # user is anything but @, and optional\n@ # mandatory @ separator\n(?P<host>[^:/]+) # host is anything but : and /\n(\n (:|/)? # directory separator is either : or /\n (?P<remote_dir>.*))? # remote directory is optional\n \"\"\"\n match = re.match(old_scheme, string, re.VERBOSE)\n if match:\n self._handle_match(match)\n else:\n raise URLParseError(\"\"\" \\\nCould not parse %s as a valid url.\nSupported schemes are\n\n user@host:directory\n\n ssh://user@host:port/directory\n\"\"\" % self.as_string)",
"def _is_url(string):\n return \"http\" in string",
"def _read_proto_resolve(self, addr: 'bytes', ptype: 'int') -> 'str | IPv4Address | IPv6Address':\n if ptype == Enum_EtherType.Internet_Protocol_version_4: # IPv4\n return ipaddress.ip_address(addr)\n if ptype == Enum_EtherType.Internet_Protocol_version_6: # IPv6\n return ipaddress.ip_address(addr)\n return addr.hex()",
"def is_valid(string: str, format_: str) -> bool:\n try:\n datetime.strptime(string, format_)\n return True\n except ValueError:\n return False",
"def isPfnForProtocol( self, path ):\n if path.startswith( '/' ):\n return S_OK( True )\n else:\n return S_OK( False )",
"def provides_protocol(type_, protocol):\n return issubclass(type_, protocol)",
"def supports_protocol(obj, protocol):\n manager = get_global_adaptation_manager()\n return manager.supports_protocol(obj, protocol)",
"def assert_message_valid(\n protocol: ProtocolMetaData,\n buf: bytes,\n encoded_message: List[Tuple[bytes, bytes]],\n decoded_message: Mapping[str, Any],\n sep: bytes,\n convert_sep_to_soh_for_checksum: bool\n) -> None:\n # Check the begin string.\n begin_string_field = protocol.fields_by_name['BeginString']\n begin_string_value = encode_value(\n protocol,\n begin_string_field,\n decoded_message[begin_string_field.name]\n )\n _assert_field_value_matches(\n begin_string_field,\n protocol.begin_string,\n begin_string_value\n )\n\n # Check the body length.\n body_length_field = protocol.fields_by_name['BodyLength']\n body_length_value = encode_value(\n protocol,\n body_length_field,\n decoded_message[body_length_field.name]\n )\n body_length = calc_body_length(buf, encoded_message, sep)\n _assert_field_value_matches(\n body_length_field,\n body_length_value,\n encode_value(protocol, body_length_field, body_length)\n )\n\n # Check the checksum.\n check_sum_field = protocol.fields_by_name['CheckSum']\n check_sum_value = encode_value(\n protocol,\n check_sum_field,\n decoded_message[check_sum_field.name]\n )\n check_sum = calc_checksum(buf, sep, convert_sep_to_soh_for_checksum)\n _assert_field_value_matches(\n check_sum_field,\n check_sum,\n check_sum_value\n )",
"def is_http(line):\n return line.startswith('http://') or line.startswith('https://')",
"def validate_protocol_type(dictionary, yaml_file):\n\n if not _valid_protocol_type(dictionary['protocol']):\n raise ClowderYAMLError(fmt.invalid_protocol_error(dictionary['protocol'], yaml_file))",
"def validate_uri(value: Any) -> str:\n uri_value = str(value)\n\n if urlparse(uri_value).scheme == \"tcp\":\n # pylint: disable-next=no-value-for-parameter\n return cast(str, vol.Schema(vol.Url())(uri_value))\n\n raise vol.Invalid(\"invalid Wyoming Protocol URI\")",
"def validate_required_protocol(dictionary, yaml_file):\n\n validate_dict_contains_value(dictionary, 'defaults', 'protocol', yaml_file)\n validate_type(dictionary['protocol'], 'protocol', str, 'str', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']",
"def buildProtocol(addr):",
"def is_valid_format(format_string): \n # default\n is_valid = True\n \n # list of valid formats\n valid_formats = ['hex', 'char', 'schar','uint', 'int', 'double', \n 'ascii', 'long', 'long long', 'float']\n \n # list of formats provided (may be a single format)\n format_list = format_string.split(', ')\n \n # check each item in the provided list\n for item in format_list:\n \n # if it does not match a valid format then it is invalid\n if item not in valid_formats:\n is_valid = False\n # end if\n # end for\n \n return is_valid",
"def verify(timestamp):\n if not isinstance(timestamp, str):\n raise TypeError('\"{}\" is not str type'.format(type(timestamp)))\n elif match('^[0-9]{1,2}(:[0-9]{1,2}){1,2}(\\.[0-9]{1,9})?$', timestamp):\n return True\n return False",
"def has_compatible_scheme(url):\n return url.startswith(('http://', 'https://'))",
"def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True",
"def is_http_url(string: str) -> bool:\n from urllib.parse import urlparse\n\n parsed_url = urlparse(string)\n return parsed_url.scheme in _http_url_schemes",
"def extract_protocol_text(protocol_text):\n if not protocol_text:\n return \"\"\n elif type(protocol_text) == str:\n return protocol_text.strip()\n elif type(protocol_text) == list:\n # These can be {\"br\": None}, so skip non string lines\n return \" \".join([line.strip() for line in protocol_text if type(line) == str])\n else:\n # Not sure what would get us here, but it's not worth raising an error over\n return str(protocol_text)",
"def parse_host(host):\n if not host:\n return None, u''\n if u':' in host:\n try:\n inet_pton(socket.AF_INET6, host)\n except socket.error as se:\n raise URLParseError('invalid IPv6 host: %r (%r)' % (host, se))\n except UnicodeEncodeError:\n pass # TODO: this can't be a real host right?\n else:\n family = socket.AF_INET6\n return family, host\n try:\n inet_pton(socket.AF_INET, host)\n except (socket.error, UnicodeEncodeError):\n family = None # not an IP\n else:\n family = socket.AF_INET\n return family, host",
"def test_normalize_xmlrpc_address_missing_protocol(self):\r\n input_val = 'google.com:1234'\r\n expected_val = 'http://google.com:1234'\r\n actual_val = normalize_xmlrpc_address(input_val, 1471)\r\n self.assertEqual(expected_val, actual_val)",
"def is_ipv4_address(s):\n # split the string on dots\n s_split = s.split('.')\n \n return len(s_split) == 4 and all(num.isdigit() and 0 <= int(num) < 256 for num in s_split)",
"def detect(byte_string):\n\n if not isinstance(byte_string, byte_cls):\n raise TypeError(unwrap(\n '''\n byte_string must be a byte string, not %s\n ''',\n _type_name(byte_string)\n ))\n\n return byte_string.find(b'-----BEGIN') != -1 or byte_string.find(b'---- BEGIN') != -1",
"def check_eth_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True",
"def validate(protocol_specification: ProtocolSpecification) -> Tuple[bool, str]:\n # Validate speech-acts section\n (\n result_speech_acts_validation,\n msg_speech_acts_validation,\n performatives_set,\n custom_types_set,\n ) = _validate_speech_acts_section(protocol_specification)\n if not result_speech_acts_validation:\n return result_speech_acts_validation, msg_speech_acts_validation\n\n # Validate protocol buffer schema code snippets\n result_protobuf_validation, msg_protobuf_validation = _validate_protocol_buffer_schema_code_snippets(protocol_specification, custom_types_set) # type: ignore\n if not result_protobuf_validation:\n return result_protobuf_validation, msg_protobuf_validation\n\n # Validate dialogue section\n result_dialogue_validation, msg_dialogue_validation = _validate_dialogue_section(protocol_specification, performatives_set) # type: ignore\n if not result_dialogue_validation:\n return result_dialogue_validation, msg_dialogue_validation\n\n return True, \"Protocol specification is valid.\"",
"def is_time_in_given_format(time_string, time_format):\n try:\n datetime.strptime(time_string, time_format)\n return True\n except ValueError:\n return False",
"def test_hex_straining():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n # single non-hex message\n r = p([\"12.8 Volts\"])\n assert len(r) == 1\n assert r[0].ecu == ECU.UNKNOWN\n assert len(r[0].frames) == 1\n\n\n # multiple non-hex message\n r = p([\"12.8 Volts\", \"NO DATA\"])\n assert len(r) == 2\n\n for m in r:\n assert m.ecu == ECU.UNKNOWN\n assert len(m.frames) == 1\n\n # mixed hex and non-hex\n r = p([\"NO DATA\", \"48 6B 10 41 00 00 01 02 03 FF\"])\n assert len(r) == 2\n\n # first message should be the valid, parsable hex message\n # NOTE: the parser happens to process the valid one's first\n check_message(r[0], 1, 0x10, [0x41, 0x00, 0x00, 0x01, 0x02, 0x03])\n\n # second message: invalid, non-parsable non-hex\n assert r[1].ecu == ECU.UNKNOWN\n assert len(r[1].frames) == 1\n assert len(r[1].data) == 0 # no data",
"def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True",
"def protocolise(url):\n parsed = urlparse.urlparse(url)\n if not parsed.scheme and not PROTORE.search(url):\n url = 'http://{0}'.format(url)\n return url",
"def is_valid_network(network):\n return re.match(r'^[a-z0-9\\-_]+$', network)",
"def _check_has_ping(data):\r\n return re.match(\r\n r'^PING :tmi\\.twitch\\.tv$', data)",
"def is_url(url):\n if '://' not in url:\n return False\n proto, addr = url.split('://', 1)\n if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']:\n return False\n return True",
"def validate_http_request(request):\r\n if request != b'':\r\n # Divide the request line: [method, sp, url, version, cr lf]\r\n request = request.decode().split('\\r')[0]\r\n method = request.split()[0]\r\n url = request.split()[1]\r\n version = request.split()[2]\r\n if method == METHOD and version == VERSION:\r\n return True, url\r\n else:\r\n return False, None\r\n else:\r\n return True, None",
"def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None",
"def validate_url(url):\n if not isinstance(url, basestring):\n raise TypeError(\"url must be a string, not %r\"%type(url))\n url = url.lower()\n \n proto_addr = url.split('://')\n assert len(proto_addr) == 2, 'Invalid url: %r'%url\n proto, addr = proto_addr\n assert proto in ['tcp','pgm','epgm','ipc','inproc'], \"Invalid protocol: %r\"%proto\n \n # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391\n # author: Remi Sabourin\n pat = re.compile(r'^([\\w\\d]([\\w\\d\\-]{0,61}[\\w\\d])?\\.)*[\\w\\d]([\\w\\d\\-]{0,61}[\\w\\d])?$')\n \n if proto == 'tcp':\n lis = addr.split(':')\n assert len(lis) == 2, 'Invalid url: %r'%url\n addr,s_port = lis\n try:\n port = int(s_port)\n except ValueError:\n raise AssertionError(\"Invalid port %r in url: %r\"%(port, url))\n \n assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url\n \n else:\n # only validate tcp urls currently\n pass\n \n return True",
"def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme",
"def unrecognised_format(link):\n print('Message has been identified as a YouTube link, but the format is not recognised.')\n print('Message was {}, support for this format should be added soon.'.format(link))\n pass",
"def validate_typed_connector_id(cid):\n\n m = re.match(r'(smpps|http)\\(([A-Za-z0-9_-]{3,25})\\)', cid, re.I)\n if not m:\n raise InvalidCidSyntax('Invalid syntax for connector id, must be smpps(some_id) or http(some_id).')\n\n return m.group(1).lower(), m.group(2)",
"def isProtocolDefined(self) -> bool:\n ...",
"def check_protobuf_using_protoc(\n path_to_generated_protocol_package: str, name: str\n) -> Tuple[bool, str]:\n try:\n try_run_protoc(path_to_generated_protocol_package, name)\n os.remove(os.path.join(path_to_generated_protocol_package, name + \"_pb2.py\"))\n return True, \"protobuf file is valid\"\n except subprocess.CalledProcessError as e:\n pattern = name + \".proto:[0-9]+:[0-9]+: \"\n error_message = re.sub(pattern, \"\", e.stderr[:-1])\n return False, error_message",
"def load(fmt: str, stream: BytesIO, _unpack=unpack):\n values = []\n bitcount = bits = 0\n for char in fmt:\n if char == '?':\n if not bitcount:\n bits = _unpack('>B', _read(stream, 1))[0]\n bitcount = 8\n value = (bits & 1) == 1\n bits >>= 1\n bitcount -= 1\n else:\n bitcount = bits = 0\n if char == 'B':\n value = _unpack('>B', _read(stream, 1))[0]\n elif char == 'H':\n value = _unpack('>H', _read(stream, 2))[0]\n elif char == 'L':\n value = _unpack('>L', _read(stream, 4))[0]\n elif char == 'Q':\n value = _unpack('>Q', _read(stream, 8))[0]\n elif char == 's':\n length = _unpack('>B', _read(stream, 1))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 'S':\n length = _unpack('>L', _read(stream, 4))[0]\n value = _unpack('>%ss' % length, _read(stream, length))[0]\n value = value.decode('utf-8', 'surrogatepass')\n elif char == 't':\n timestamp = _unpack('>Q', _read(stream, 8))[0]\n value = datetime.utcfromtimestamp(timestamp)\n elif char == 'T':\n value = {}\n length = _unpack('>L', _read(stream, 4))[0]\n stream2 = BytesIO(_read(stream, length))\n while stream2.tell() < length:\n key = load('s', stream2)[0]\n value[key] = _load_item(stream2)\n elif char != '?':\n raise ValueError('wrong format char', char)\n values.append(value)\n return values",
"def test_parse_host_port(self):\n # test default port for http\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test default port for https\n endpoint = \"1.2.3.4\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTPS)\n self.assertEqual(host, endpoint)\n self.assertEqual(port, default_protocol.default_port)\n\n # test specific port\n endpoint = \"1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTP\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)\n\n # test value error\n endpoint = \"1.2.3.4:abcd\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # protocol unsupported\n endpoint = \"ftp://1.2.3.4\"\n default_protocol = baidubce.protocol.HTTP\n self.assertRaises(ValueError, utils.parse_host_port, endpoint, default_protocol)\n\n # test of endpoint dominates the protocol\n endpoint = \"http://1.2.3.4:8080\"\n default_protocol = baidubce.protocol.HTTPS\n ret_protocol, host, port = utils.parse_host_port(endpoint, default_protocol)\n self.assertEqual(ret_protocol, baidubce.protocol.HTTP)\n self.assertEqual(host, \"1.2.3.4\")\n self.assertEqual(port, 8080)",
"def test_multi_line():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n test_case = [\n \"48 6B 10 49 02 01 00 01 02 03 FF\",\n \"48 6B 10 49 02 02 04 05 06 07 FF\",\n \"48 6B 10 49 02 03 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x49, 0x02] + list(range(12))\n\n # in-order\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)\n\n # test a few out-of-order cases\n for n in range(4):\n random.shuffle(test_case) # mix up the frame strings\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)",
"def validate_http_request(request):\n request_str = request.decode('utf-8')\n print(request_str)\n split_request = request_str.split(' ')\n if (split_request[0] == 'GET') and split_request[2].startswith('HTTP/1.1'):\n request_url = split_request[1].replace(\"/\", \"\\\\\")\n x = (True, request_url)\n return x\n y = (False, None)\n return y",
"def _parse_status_line(line):\n # Up to the first space is the protocol version.\n index0 = line.index(SPACE)\n http_version = line[: index0]\n # Make sure it's the protocol version we recognize.\n assert http_version == HTTP_VERSION\n # Starting from the first space, up to the next space is the status code.\n index1 = line.index(SPACE, index0 + 1)\n status = line[index0 + 1 : index1]\n # Convert the status code to an integer.\n status = int(status)\n # The remainder is the reason.\n reason = line[index1 + 1 :]\n return status, reason",
"def isIpv4Addr(string):\n return (True)",
"def parseString(self, s):\n pass",
"def _check_line_is_good(self, string):\r\n # The standard requires we only accept strings ending in \\r\\n or \\n\r\n if (string[-1] != \"\\n\"):\r\n raise ParseError('Line endings were not as expected', string)\r\n \r\n # The standard places a limit on line lengths\r\n if (len(string)) > 512:\r\n raise ProtocolError('Line too long to be valid', string)\r\n \r\n # Trim our trailing whitespace/line endings\r\n return string.rstrip()",
"def run_protocol_checks(sub: Submission, logger):\n\n protocols = sub.protocol\n\n codes = []\n names = set()\n p_types = set()\n allowed_types = ontology_term(\"protocol_types\")\n mandatory = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n (attrib[\"mandatory\"] == \"ma\" or attrib[\"mandatory\"] == \"seq\")]\n exclusive = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n attrib[\"mandatory\"] == \"one of\"]\n found_exclusive = False\n\n if not protocols:\n logger.error(\"Experiment has no protocols. At least one expected.\")\n codes.append(\"PROT-E01\")\n return codes\n for p in protocols:\n if p.alias:\n # Protocol names should be unique.\n if p.alias in names:\n logger.error(\"Protocol name \\\"{}\\\" is not unique.\".format(p.alias))\n codes.append(\"PROT-E04\")\n names.add(p.alias)\n # Protocol must have a name\n else:\n logger.error(\"Protocol found with no name. Not checking it further.\")\n codes.append(\"PROT-E02\")\n continue\n if p.description:\n # Protocol description should be longer than 50 characters\n if len(p.description) < 50:\n logger.warning(\"Protocol \\\"{}\\\" is shorter than 50 characters.\".format(p.alias))\n codes.append(\"PROT-W01\")\n # Protocol must have description\n else:\n logger.error(\"Protocol \\\"{}\\\" has no description.\".format(p.alias))\n codes.append(\"PROT-E03\")\n if p.protocol_type:\n # Protocol type must be from controlled vocabulary (EFO)\n p_types.add(p.protocol_type.value)\n if p.protocol_type.value not in allowed_types:\n logger.error(\"Protocol \\\"{}\\\" has a type that is not from controlled vocabulary/EFO: \"\n \"\\\"{}\\\"\".format(p.alias, p.protocol_type.value))\n codes.append(\"PROT-E05\")\n if p.protocol_type.value in exclusive:\n found_exclusive = True\n else:\n # Protocol must have a protocol type\n logger.warn(\"Protocol \\\"{}\\\" has no protocol type.\".format(p.alias))\n codes.append(\"PROT-E07\")\n\n # Mandatory protocol types (for all experiment types) must be present\n for p_type in mandatory:\n if p_type not in p_types:\n logger.error(\"A {} must be included.\".format(p_type))\n codes.append(\"PROT-E06\")\n\n # Every experiment must have at least one growth/treatment/sample collection protocol\n if not found_exclusive:\n logger.error(\"A growth, treatment or sample collection protocol must be included.\")\n codes.append(\"PROT-E07\")\n\n return codes",
"def checkDataType(self,str):\n accepted_vals = [\"HEXA\",\"NEHU\",\"NEDS\",\"NEDU\",\"NDHU\",\"NDDU\"]\n assert str in accepted_vals, \"Error: Data Type not accepted: \" + str\n if (str == 'HEXA') | (str[2] == 'H'):\n self.base = 16\n if str[3] == 'S':\n self.signed = True",
"def test_unicodeAndBytes(self):\n self.assertTrue(isIPv6Address(b\"fe80::2%1\"))\n self.assertTrue(isIPv6Address(u\"fe80::2%1\"))\n self.assertFalse(isIPv6Address(u\"\\u4321\"))\n self.assertFalse(isIPv6Address(u\"hello%eth0\"))\n self.assertFalse(isIPv6Address(b\"hello%eth0\"))",
"def validate_format(self):\n raise NotImplementedError()",
"def test_protocols(container, protocol):\n assert isinstance(container, protocol)",
"def validate_and_parse_input(time: str):\n if time is None or not re.match(r'^\\d{1,2}:\\d{1,2}$', time):\n return False\n hour, minute = map(int, time.split(r':'))\n if type(hour) != int or type(minute) != int:\n return False\n\n if 0 <= hour < 24 and 0 <= minute < 60:\n hour = hour % 12\n minute = minute\n return hour, minute\n else:\n return False",
"def parse_handshake(self, data):\n\n if (data[0] != len(PSTR) or data[1:20] != PSTR\n or data[28:48] != self.factory.torrent.info_hash):\n\n self.transport.loseConnection()\n else:\n self.handshaked = True\n\n reserved = data[20:28]\n if reserved[7] & ord('\\x04'):\n self.fast_extension = True\n\n if reserved[7] & ord('\\x01'):\n self.dht = True",
"def isBinaryFormat(content, maxline=20):\n for lc in content[:maxline]:\n if b'format' in lc:\n if b'binary' in lc:\n return True\n return False\n return False",
"def _parse_inet(line):\n tokens = line.split()\n return netaddr.IPNetwork(tokens[1])",
"def ch_ipv4(sp):\n octet = sp.split('.')\n if len(octet) != 4:\n return False\n try:\n return all(0<=int(p)<256 for p in octet)\n except ValueError:\n return False",
"def text_parse(text):\n is_connected = False\n times = 0\n\n text = clean_text(text)\n\n \"\"\"\n Since the response follow that, if yes, \"Y<whatever>3\"; if no, \"N\"\n we just need find the digit in the text\n\n if the text does contain digit, it must be no;\n if contains and great than 0, it must be yes.\n \"\"\"\n match = re.search(r\"\\d+\", text)\n if match:\n times = int(match.group())\n if times > 0:\n is_connected = True\n\n elif \"once\" in text:\n is_connected = True\n times = 1\n\n return times, is_connected",
"def _check_ip(val: Any, input_format: str, clean: bool) -> Any:\n try:\n if val in NULL_VALUES:\n return (None, \"null\") if clean else False\n\n address = ip_address(val)\n vers = address.version\n\n if vers == 4 and input_format != \"ipv6\" or vers == 6 and input_format != \"ipv4\":\n return (address, \"success\") if clean else True\n return (None, \"unknown\") if clean else False\n\n except (TypeError, ValueError):\n return (None, \"unknown\") if clean else False",
"def _parse(self, line):\n comd, value = cmd.parse(line, CLIENT_PREFIX)\n if comd == 'reg':\n self.peername = value\n elif comd == 'eof':\n self._reset()\n msg = cmd.clientcmd(comd, value)\n msg = cmd.addFirst(msg, self.peername)\n self.sendString(msg)\n elif comd == 'fail':\n self._reset()\n else:\n return False\n return True",
"def isLinkIdFormatValid(link_id):\n if linkable.LINK_ID_REGEX.match(link_id):\n return True\n return False",
"def isUrlScheme(urlScheme):\n if not urlScheme:\n return False\n \n #an urlscheme can be anything that starts with alfanumeric charaters followed by a colon. \n pattern = re.compile('^[a-zA-Z][+a-zA-Z0-9.-]*:$')\n return bool(pattern.search(str(urlScheme)))",
"def provides_protocol(type_, protocol):\n return AdaptationManager.provides_protocol(type_, protocol)",
"def is_binary_format(content, maxline=20):\n for lc in content[:maxline]:\n if b'format' in lc:\n if b'binary' in lc:\n return True\n return False\n return False",
"def check_packet_formedness(packet):\n if len(packet) != TS.PACKET_SIZE:\n raise Exception(\"Provided input packet string not of correct size\")\n\n if packet[0] != TS.SYNC_BYTE:\n raise Exception(\"Provided input packet does not begin with correct sync byte.\")",
"def validate_optional_protocol(dictionary, yaml_file):\n\n if 'protocol' in dictionary:\n validate_type(dictionary['protocol'], 'protocol', str, 'protocol', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']",
"def test_ipv6_validation_failure():\n with pytest.raises(socket.error):\n is_ipv6('2001::0234:C1ab::A0:aabc:003F')",
"def determine_supported_protocol(self, earliest, latest):\n earliest = int(earliest.split('.')[0])\n latest = int(latest.split('.')[0])\n if earliest <= latest:\n supported = range(earliest, latest + 1)\n for version in (reversed(supported)):\n if version in RPCS.SUPPORTED_PROTOCOL_VERSIONS:\n return str(version)\n\n # If no common protocol version is found, raise fatal error\n raise ClientRequestError('NoValidProtocolVersionInCommon')",
"def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"",
"def parse_string(self, data):\n pass",
"def __find_protocol(self, url):\n match = self.__REGEX_SCHEMA.search(url)\n if match:\n protocol = match.group(0).split(':')[0]\n return protocol\n return None",
"def validate_subprotocol(subprotocol, hixie):\n\n if not subprotocol:\n raise HandshakeException('Invalid subprotocol name: empty')\n if hixie:\n # Parameter should be in the range U+0020 to U+007E.\n for c in subprotocol:\n if not 0x20 <= ord(c) <= 0x7e:\n raise HandshakeException(\n 'Illegal character in subprotocol name: %r' % c)\n else:\n # Parameter should be encoded HTTP token.\n state = http_header_util.ParsingState(subprotocol)\n token = http_header_util.consume_token(state)\n rest = http_header_util.peek(state)\n # If |rest| is not None, |subprotocol| is not one token or invalid. If\n # |rest| is None, |token| must not be None because |subprotocol| is\n # concatenation of |token| and |rest| and is not None.\n if rest is not None:\n raise HandshakeException('Invalid non-token string in subprotocol '\n 'name: %r' % rest)",
"def url_checker(url):\n if url.startswith(http_req):\n url_name = url[7:]\n # print('URL check passed. Using http')\n return url_name\n if url.startswith(https_req):\n url_name = url[8:]\n # print('URL check passed. Using https')\n return url_name\n else:\n print('URL check failed. not valid http or https URL')\n print(f'Bad URL:{url}')\n sys.exit()\n # return False",
"def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))",
"def is_valid_ipv6_address(ip_str):\r\n # We need to have at least one ':'.\r\n if ':' not in ip_str:\r\n return False\r\n\r\n # We can only have one '::' shortener.\r\n if ip_str.count('::') > 1:\r\n return False\r\n\r\n # '::' should be encompassed by start, digits or end.\r\n if ':::' in ip_str:\r\n return False\r\n\r\n # A single colon can neither start nor end an address.\r\n if ((ip_str.startswith(':') and not ip_str.startswith('::')) or\r\n (ip_str.endswith(':') and not ip_str.endswith('::'))):\r\n return False\r\n\r\n # We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid)\r\n if ip_str.count(':') > 7:\r\n return False\r\n\r\n # If we have no concatenation, we need to have 8 fields with 7 ':'.\r\n if '::' not in ip_str and ip_str.count(':') != 7:\r\n # We might have an IPv4 mapped address.\r\n if ip_str.count('.') != 3:\r\n return False\r\n\r\n ip_str = _explode_shorthand_ip_string(ip_str)\r\n\r\n # Now that we have that all squared away, let's check that each of the\r\n # hextets are between 0x0 and 0xFFFF.\r\n for hextet in ip_str.split(':'):\r\n if hextet.count('.') == 3:\r\n # If we have an IPv4 mapped address, the IPv4 portion has to\r\n # be at the end of the IPv6 portion.\r\n if not ip_str.split(':')[-1] == hextet:\r\n return False\r\n if not is_valid_ipv4_address(hextet):\r\n return False\r\n else:\r\n try:\r\n # a value error here means that we got a bad hextet,\r\n # something like 0xzzzz\r\n if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF:\r\n return False\r\n except ValueError:\r\n return False\r\n return True",
"def is_valid_ipv4_address(address):\n\n if not isinstance(address, (bytes, str_type)):\n return False\n\n # checks if theres four period separated values\n\n if address.count('.') != 3:\n return False\n\n # checks that each value in the octet are decimal values between 0-255\n for entry in address.split('.'):\n if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:\n return False\n elif entry[0] == '0' and len(entry) > 1:\n return False # leading zeros, for instance in '1.2.3.001'\n\n return True",
"def check_address_format(address):\n if len(address) != 42 or address[:2] != '0x':\n return False\n\n for ch in address[2:]:\n if ch not in \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\":\n return False\n\n return True",
"def _check_logic_syntax(string):\n return logExp.matches(string)"
] |
[
"0.62597406",
"0.61697423",
"0.6072746",
"0.60350204",
"0.575354",
"0.57534987",
"0.5701303",
"0.5671702",
"0.5651911",
"0.5631964",
"0.56221384",
"0.55761224",
"0.55722475",
"0.55467844",
"0.55360466",
"0.5512623",
"0.54971683",
"0.5479753",
"0.54710925",
"0.543337",
"0.54208523",
"0.542048",
"0.5333435",
"0.5318258",
"0.5310076",
"0.52916116",
"0.5290824",
"0.52720034",
"0.5267924",
"0.52477086",
"0.52396536",
"0.5235663",
"0.52142525",
"0.5208906",
"0.5158011",
"0.51565707",
"0.5150027",
"0.5149107",
"0.5148703",
"0.5142467",
"0.5137993",
"0.5132585",
"0.5127695",
"0.5112911",
"0.5111841",
"0.5084113",
"0.5063951",
"0.5063916",
"0.5063833",
"0.50555134",
"0.5048406",
"0.5034834",
"0.5031303",
"0.50312966",
"0.5012842",
"0.50108325",
"0.5010289",
"0.5004859",
"0.5001669",
"0.49988928",
"0.4991441",
"0.49793977",
"0.4973077",
"0.49707076",
"0.49651232",
"0.49568346",
"0.4956447",
"0.49455318",
"0.49398673",
"0.49094915",
"0.4908743",
"0.49050185",
"0.49035046",
"0.49014983",
"0.49007487",
"0.488947",
"0.48765114",
"0.4870601",
"0.48626977",
"0.4862453",
"0.48485795",
"0.48465067",
"0.48381004",
"0.48337314",
"0.48314112",
"0.48245457",
"0.48143822",
"0.4807985",
"0.4803679",
"0.48028043",
"0.48008507",
"0.4798089",
"0.47975463",
"0.47957626",
"0.47932065",
"0.47909364",
"0.47896466",
"0.47894508",
"0.4785174",
"0.47773787"
] |
0.5759088
|
4
|
This function determines the common supported protocol version. This is determined by a version supported by the RPCS server that is in the range of numbers that exist between the value of the first integer of the earliest supported protocol version and the value of the first integer of the latest supported protocol version.
|
def determine_supported_protocol(self, earliest, latest):
earliest = int(earliest.split('.')[0])
latest = int(latest.split('.')[0])
if earliest <= latest:
supported = range(earliest, latest + 1)
for version in (reversed(supported)):
if version in RPCS.SUPPORTED_PROTOCOL_VERSIONS:
return str(version)
# If no common protocol version is found, raise fatal error
raise ClientRequestError('NoValidProtocolVersionInCommon')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compare_protocol_versions(self, session):\n # First parse protocol version strings to check for invalid formatting\n invalid_string = self.parse_protocol_version(\n [self.earliest_protocol_version, self.latest_protocol_version])\n if invalid_string is not None:\n # Error during protocol string parsing\n data = ('earliest_protocol_version'\n if invalid_string == self.earliest_protocol_version else 'latest_protocol_version')\n raise ClientRequestError('InvalidParameterValue', data=data)\n\n # Check if protocol version is supported and define the one to use\n self.protocol_version = self.determine_supported_protocol(\n self.earliest_protocol_version, self.latest_protocol_version)",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def do_protocol_version(self):\n return \"2\", True",
"def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version",
"async def get_protocol_version(self):\n if self.query_reply_data.get(PrivateConstants.REPORT_VERSION) == '':\n await self._send_command([PrivateConstants.REPORT_VERSION])\n while self.query_reply_data.get(\n PrivateConstants.REPORT_VERSION) == '':\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.REPORT_VERSION)",
"async def get_protocol_version(self):\n if self.query_reply_data.get(PrivateConstants.REPORT_VERSION) == '':\n await self._send_command([PrivateConstants.REPORT_VERSION])\n while self.query_reply_data.get(\n PrivateConstants.REPORT_VERSION) == '':\n await asyncio.sleep(self.sleep_tune)\n return self.query_reply_data.get(PrivateConstants.REPORT_VERSION)",
"def parse_protocol_version(self, version_string_list):\n # Verify for every provided string if it is in proper versioning format\n for version_string in version_string_list:\n\n try:\n parsed_version_string = version_string.split('.')\n if len(parsed_version_string) == 1 and version_string.isdigit():\n # No dots in version string, it is a simple integer.\n continue\n\n StrictVersion(version_string)\n\n except (AttributeError, ValueError):\n LOG.debug('Invalid protocol version string provided')\n return version_string\n\n # Check for malformatting\n for i in range(len(parsed_version_string)):\n if len(parsed_version_string[i]) > 1:\n if parsed_version_string[i][0] == '0': # Leading 0's\n return version_string\n if len(parsed_version_string[i]) < 1: # Empty strings\n return version_string\n\n # Protocol version formating: OK\n return None",
"def supported_marshaller_api_versions() -> Tuple[str]:\n return (\"1.0\",)",
"def getNativeChangesetVersion(protocolVersion):\n # Add more versions as necessary, but do remember to add them to\n # netclient's FILE_CONTAINER_* constants\n if protocolVersion < 38:\n return filecontainer.FILE_CONTAINER_VERSION_NO_REMOVES\n elif protocolVersion < 43:\n return filecontainer.FILE_CONTAINER_VERSION_WITH_REMOVES\n # Add more changeset versions here as the currently newest client is\n # replaced by a newer one\n return filecontainer.FILE_CONTAINER_VERSION_FILEID_IDX",
"def _supported_versions(self, jarm_details, grease):\n if (jarm_details[7] == \"1.2_SUPPORT\"):\n # TLS 1.3 is not supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\"]\n else:\n # TLS 1.3 is supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\", b\"\\x03\\x04\"]\n\n # Change supported version order, by default, the versions are from\n # oldest to newest.\n if jarm_details[8] != \"FORWARD\":\n tls = self._cipher_mung(tls, jarm_details[8])\n\n # Assemble the extension.\n ext = b\"\\x00\\x2b\"\n # Add GREASE if applicable.\n if grease:\n versions = self._choose_grease()\n else:\n versions = b\"\"\n\n for version in tls:\n versions += version\n\n second_length = len(versions)\n first_length = second_length+1\n ext += struct.pack(\">H\", first_length)\n ext += struct.pack(\">B\", second_length)\n ext += versions\n\n return ext",
"def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]",
"def check_capability_negotiation(\n self, environ, start_response, response_headers):\n ua = sa = None\n if \"HTTP_DATASERVICEVERSION\" in environ:\n major, minor, ua = core.parse_dataservice_version(\n environ[\"HTTP_DATASERVICEVERSION\"])\n else:\n major = 2\n minor = 0\n if \"HTTP_MAXDATASERVICEVERSION\" in environ:\n # (unused max_minor)\n max_major, max_minor, sa = core.parse_max_dataservice_version(\n environ[\"HTTP_MAXDATASERVICEVERSION\"]) # noqa\n else:\n max_major = major\n if major > 2 or (major == 2 and minor > 0):\n # we can't cope with this request\n return None\n elif max_major >= 2:\n response_headers.append(\n ('DataServiceVersion', '2.0; pyslet %s' % info.version))\n return 2\n else:\n response_headers.append(\n ('DataServiceVersion', '1.0; pyslet %s' % info.version))\n return 1",
"def get_server_version():\n url_address = 'https://raw.githubusercontent.com/muhammadfredo/FrMaya/master/FrMaya/version.py'\n url_data = urllib2.urlopen(url_address).read(200)\n result = re.search(r'(\\d+), (\\d+), (\\d+)', url_data, re.MULTILINE)\n if result:\n version_list = [int(v) for v in result.groups()]\n return version_list\n else:\n raise ValueError('Cannot get server version!!!')",
"def test_get_protocol_version_name(self):\n server, client = loopback()\n client_protocol_version_name = client.get_protocol_version_name()\n server_protocol_version_name = server.get_protocol_version_name()\n\n assert isinstance(server_protocol_version_name, str)\n assert isinstance(client_protocol_version_name, str)\n\n assert server_protocol_version_name == client_protocol_version_name",
"def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION",
"def RemoteVersion(self):\n try:\n ver = self.metamanager.Version(connectme_pb2.VersionRequest())\n return (ver.major, ver.minor)\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e",
"def get_protocol_version(api_url : str, session : Optional[requests.Session] = None) -> ProtocolVersionResponse:\n return protocolVersion(api_url, session)",
"def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)",
"def version_compare(compare_ver, min_version, max_version):\n if max_version == \"*\":\n return True\n if max_version == \"-\" or not max_version:\n max_version = \"0\"\n if not min_version or min_version == \"*\" or min_version == \"-\":\n min_version = \"0\"\n if compare_ver == \"-\" or compare_ver == \"*\":\n compare_ver = \"0\"\n if compare_ver == min_version or compare_ver == max_version:\n return True\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n # If all versions follow proper versioning then perform a simple numerical comparison\n if len(compare_ver_parts) == len(min_version_parts) and len(\n compare_ver_parts\n ) == len(max_version_parts):\n compare_ver_num = normalise_num(compare_ver, len(compare_ver_parts))\n min_version_num = normalise_num(min_version, len(compare_ver_parts))\n max_version_num = normalise_num(max_version, len(compare_ver_parts))\n if compare_ver_num >= min_version_num and compare_ver_num <= max_version_num:\n return True\n\n normal_len = len(compare_ver_parts)\n if len(min_version_parts) > normal_len:\n normal_len = len(min_version_parts)\n if len(max_version_parts) > normal_len:\n normal_len = len(max_version_parts)\n\n # Normalise the version numbers to be of same length\n compare_ver = normalise_version_str(compare_ver, normal_len)\n min_version = normalise_version_str(min_version, normal_len)\n max_version = normalise_version_str(max_version, normal_len)\n\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n for i in range(0, normal_len):\n if (\n not compare_ver_parts[i].isdigit()\n or not min_version_parts[i].isdigit()\n or not max_version_parts[i].isdigit()\n ):\n if (\n compare_ver_parts[i] == min_version_parts[i]\n and compare_ver_parts[i] == max_version_parts[i]\n ):\n continue\n else:\n return False\n elif int(compare_ver_parts[i]) >= int(min_version_parts[i]) and int(\n compare_ver_parts[i]\n ) <= int(max_version_parts[i]):\n continue\n else:\n return False\n return True",
"def version_max():\n return VERSION_MAX",
"def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected",
"def __get_best_version(self):\n\t\tif self.length < 32:\n\t\t\treturn 2 # version 2\n\t\telif self.length < 53:\n\t\t\treturn 3 # version 3\n\t\telif self.length < 78:\n\t\t\treturn 4 # version 4\n\t\telif self.length < 106:\n\t\t\treturn 5 # version 5\n\t\telif self.length < 134:\n\t\t\treturn 6 # version 6\n\t\telse:\n\t\t\treturn \"Too long data\"",
"def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test",
"def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))",
"def getAPIVersion(self, req):\n import re\n import tracrpc\n match = re.match(r'([0-9]+)\\.([0-9]+)\\.([0-9]+)', tracrpc.__version__)\n return map(int, match.groups())",
"def get_version(client):\n version = client.info()['version']['number']\n version = version.split('-')[0]\n if len(version.split('.')) > 3:\n version = version.split('.')[:-1]\n else:\n version = version.split('.')\n return tuple(map(int, version))",
"def get_min_cli_version(k8s_cli):\n return MIN_OC_VERSION_SUPPORT_RETRIES if (k8s_cli and k8s_cli.endswith(OC_K8S_CLI))\\\n else MIN_KUBECTL_VERSION_SUPPORT_RETRIES",
"def compare_versions(current_version, supported_version):\n try:\n current = current_version.split(\".\")\n supported = supported_version.split(\".\")\n\n if int(current[0]) < int(supported[0]):\n return False\n if int(current[0]) > int(supported[0]):\n return True\n return int(current[1]) >= int(supported[1])\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version\")\n return False",
"def get_friendly_of_version(self, ofproto):\n if ofproto.OFP_VERSION == 1:\n _of_version = \"1.0\"\n elif ofproto.OFP_VERSION == 4:\n _of_version = \"1.3\"\n else:\n _of_version = \"Unknown version \" + \\\n str(ofproto.OFP_VERSION)\n return _of_version",
"def select_versions(self):\n super(ChannelBackend, self).select_versions()\n return [('1.1', '1.1')]",
"def check_conventions_version_number(ds, attr, conv_type, min_ver, max_ver):\n\n if attr not in ds.ncattrs():\n return 0\n global_attr = getattr(ds, attr)\n\n version = None\n global_attr_split = global_attr.split(' ')\n for conv in global_attr_split:\n if conv_type in conv:\n version = float(re.findall(r\"[+]?\\d*\\.\\d+|\\d+\", conv)[0])\n\n if not version:\n return 1\n\n range_check = None\n if conv_type == 'CF':\n range_check = min_ver <= version <= max_ver\n elif conv_type == 'ATMODAT':\n range_check = (version == min_ver) and (version == max_ver)\n\n if not range_check:\n return 2\n else:\n return 3",
"def get_ip_version(network):\r\n if netaddr.IPNetwork(network).version == 6:\r\n return \"IPv6\"\r\n elif netaddr.IPNetwork(network).version == 4:\r\n return \"IPv4\"",
"def get_supported_protocol_enum(self, supported_protocol):\n\n supported_protocol = \"MULTI_PROTOCOL\" if \\\n supported_protocol == \"MULTIPROTOCOL\" else supported_protocol\n if supported_protocol in utils.FSSupportedProtocolEnum.__members__:\n return utils.FSSupportedProtocolEnum[supported_protocol]\n else:\n errormsg = \"Invalid choice {0} for supported_protocol\".format(\n supported_protocol)\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"async def get_supported_versions(self) -> dict:\n return await self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def protocol(self):\n v = uint16_packer.unpack(self[2:4])[0]\n mask = 0b111111111111\n return v & mask",
"def get_required_ovs_version(self):\n return self.get_required_version(\"Open vSwitch\", self.openshift_to_ovs_version)",
"def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()",
"def get_ip_version(network):\n if netaddr.IPNetwork(network).version == 6:\n return \"IPv6\"\n elif netaddr.IPNetwork(network).version == 4:\n return \"IPv4\"",
"def guess_version(self):\n\t\ttry:\n\t\t\tself.hdf5file[\"/Analyses/Basecall_2D_%03d/BaseCalled_template\" % (self.group)]\n\t\t\treturn 'classic'\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.hdf5file[\"/Analyses/Basecall_1D_%03d/BaseCalled_template\" % (self.group)]\n\t\t\treturn 'metrichor1.16'\n\t\texcept KeyError:\n\t\t\tpass\n\n\t\t# less likely\n try:\n self.hdf5file[\"/Analyses/Basecall_RNN_1D_%03d/BaseCalled_template\" % (self.group)]\n return 'r9rnn'\n except KeyError:\n pass\n\n\t\treturn 'prebasecalled'",
"def test_tls_client_minimum_set(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")\n\n # Also test a string version\n config = {\"federation_client_minimum_tls_version\": \"1\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": \"1.2\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")",
"def version_check(version):\n return {\n 1: 'OF10', # 0x01 -> OF1.0\n 3: 'OF12', # 0x03 -> OF1.2\n 4: 'OF13', # 0x04 -> OF1.3\n 5: 'OF14', # 0x05 -> OF1.4\n 6: 'OF15', # 0x06 -> OF1.5\n }.get(version, 0)",
"def get_version_number():\n return [0, 1, 0]",
"def version(self):\n done, data = self._request('GV')\n if done:\n return {\n 'firmware': data[0],\n 'protocol': data[1]\n }\n\n raise EvseError",
"def proto_check(proto):\n # Check for TCP\n if proto == 6:\n return 'tcp'\n # Check for UDP\n elif proto == 17:\n return 'udp'\n else:\n return None",
"def getVersionName(self):\r\n if self.version == (3,0):\r\n return \"SSL 3.0\"\r\n elif self.version == (3,1):\r\n return \"TLS 1.0\"\r\n elif self.version == (3,2):\r\n return \"TLS 1.1\"\r\n else:\r\n return None",
"def parse_protocol_header(stream: BytesIO) -> Tuple[int, int, int]:\n prefix, *version = unpack('>5sBBB', _read(stream, 8))\n if prefix != b'AMQP\\x00':\n raise ValueError(\"wrong protocol, expected b'AMQP\\x00', got {}\".format(\n prefix\n ))\n return version",
"def get_version():\n return '%d.%d.%d' % version_info",
"def _choice_protocol(self):\n # space to add more complex choice algorithms, if desired\n return 0",
"def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol",
"def _get_cfg_v(self):\n if CONFIG_VERSION_KEY in self[CONFIG_KEY]:\n v_str = self[CONFIG_KEY][CONFIG_VERSION_KEY]\n if not isinstance(v_str, str):\n raise InvalidConfigFileException(\"{} must be a string\".\n format(CONFIG_VERSION_KEY))\n v_bundle = v_str.split(\".\")\n assert len(v_bundle) == 3, \\\n InvalidConfigFileException(\"Version string is not tripartite\")\n try:\n v_bundle = list(map(int, v_bundle))\n except ValueError:\n raise InvalidConfigFileException(\"Version string elements are \"\n \"not coercible to integers\")\n if v_bundle[0] < 2:\n if SAMPLE_MODS_KEY in self[CONFIG_KEY]:\n raise InvalidConfigFileException(\n \"Project configuration file ({p}) subscribes to {c} \"\n \">= 2.0.0, since '{m}' section is defined. Set {c} to \"\n \"2.0.0 in your config\".format(p=self[CONFIG_FILE_KEY],\n c=CONFIG_VERSION_KEY,\n m=SAMPLE_MODS_KEY))\n else:\n self._format_cfg()\n return [\"2\", \"0\", \"0\"]\n return list(map(str, v_bundle))\n else:\n self._format_cfg()\n return [\"2\", \"0\", \"0\"]",
"def version_min():\n return VERSION_MIN",
"def get_version(self, value):\n version = []\n for i in range(2):\n version = [(value >> (i * 16)) & 0xFFFF] + version\n return '.'.join([str(x) for x in version])",
"def get_supported_versions(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def get_server_version(object = server_status_req):\n try:\n response = urllib2.urlopen(object).read()\n server_connect = json.loads(response)\n return server_connect['GetSeverStatus']\n except URLError, e:\n print 'Error: No Response From Server.'",
"def version(self):\n return self.rpc.call(MsfRpcMethod.CoreVersion)",
"def parse_compute_version(compute_version):\n split_ver = compute_version.split(\".\")\n try:\n major = int(split_ver[0])\n minor = int(split_ver[1])\n return major, minor\n except (IndexError, ValueError) as err:\n # pylint: disable=raise-missing-from\n raise RuntimeError(\"Compute version parsing error: \" + str(err))",
"def getProtocol(self) -> str:\n ...",
"def get_version():\n return 1",
"def micro_Version(self):\n return tuple(map(ord, self._serial_io(b'\\x56', 2)[0:2]))",
"def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n main1 = 0\n main2 = 0\n branch1 = 0\n branch2 = 0\n list1 = version1.split('.')\n list2 = version2.split('.')\n for i in range(max(len(list1),len(list2))):\n item1 = 0 if (i > len(list1)-1) else int(list1[i])\n item2 = 0 if (i > len(list2)-1) else int(list2[i])\n if item1 > item2:\n return 1\n if item2 > item1:\n return -1\n return 0",
"def _fetch_api_versions(self):\n log.debug(\"Fetch SASL authentication api versions.\")\n self._broker_connection.request(ApiVersionsRequest())\n response = ApiVersionsResponse(self._broker_connection.response())\n\n self.handshake_version = response.api_versions[SaslHandshakeRequest.API_KEY].max\n self.auth_version = response.api_versions.get(SaslAuthenticateRequest.API_KEY, None)\n\n self.handshake_version = min(self.MAX_HANDSHAKE_VERSION, self.handshake_version)\n if self.auth_version is not None:\n self.auth_version = min(self.auth_version.max, self.MAX_AUTH_VERSION)\n log.debug(\n \"Determinded handshake api version {} and authenticate api version {}\".format(\n self.handshake_version, self.auth_version\n )\n )",
"def test_low_client(self):\n version, file = self.get('', '3000000001100',\n self.app, self.platform)\n assert version == self.version_1_0_2",
"def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")",
"def find_py_between( minver, maxver ):\n\n minver = canon_ver(minver)\n maxver = canon_ver(maxver)\n\n for pyexe, info in get_python_verlist():\n \n thisver = canon_ver(info[2])\n\n if thisver >= minver and thisver <= maxver:\n return pyexe\n\n # can't satisfy requirement\n return None",
"def determine_senior_version(model_versions: dict) -> str:\r\n senior_ver = parse(\"0.0\")\r\n for ver in model_versions.values():\r\n if senior_ver < parse(ver):\r\n senior_ver = parse(ver)\r\n return str(senior_ver)",
"def get_highest_http(uri, https, upgrade=True):\n highest_http = '1.0'\n response_status = \"\"\n redirect = False\n location = \"\"\n port = 443 if https else 80\n use_https = https\n use_upgrade = upgrade\n host, path = get_host(uri)\n i_p = check_host_name(host)\n request_line = \"GET \"+ path +\" HTTP/1.1\\r\\n\"\n headers_line = \"Host: \"+ host+ \"\\r\\n\"\n\n upgrade_line = \"Connection: close\\r\\nUpgrade: h2c\\r\\n\\r\\n\" if not https \\\n else \"Connection: Close\\r\\nuser-agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US)\"+ \\\n \"AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.375.86 Safari/533.4\\r\\n\\r\\n\" #[3]\n\n h11_request = (request_line+headers_line+upgrade_line).encode()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n if https:\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n ctx.set_alpn_protocols(['h2', 'http/1.1', 'http/1.0'])\n ssl_sock = ctx.wrap_socket(sock, server_hostname=host)\n sock = ssl_sock\n try:\n sock.settimeout(5)\n sock.connect((i_p, port))\n sock.settimeout(None)\n except socket.error:\n print(\"The socket can't seem to connect,\"+\n \"even though host name was resolved for the provided URI\")\n sys.exit()\n except socket.timeout:\n print(\"A timeout occured because the host failed to connect for 5 seconds\")\n if https:\n proto = sock.selected_alpn_protocol()\n if proto == 'h2':\n highest_http = '2.0'\n sock.close()\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ctx = ssl.create_default_context()\n ctx.set_alpn_protocols(['http/1.1', 'http/1.0'])\n ssl_sock = ctx.wrap_socket(sock, server_hostname=host)\n sock = ssl_sock\n sock.connect((i_p, port))\n\n rec = send_and_recieve(sock, h11_request)\n sock.close()\n status_line = rec[0]\n response_headers = rec[1:]\n\n if highest_http != '2.0':\n highest_http = \"1.0\" if 'HTTP/1.0' in status_line else \"1.1\"\n if not https and '101' in status_line:\n highest_http = \"2.0\"\n\n\n if '200' not in status_line and '204' not in status_line and '205' not in status_line:\n if '302' in status_line or '301' in status_line:\n\n redirect = True\n\n for header in response_headers:\n if 'Location' in header:\n if 'https' in header:\n use_https = True\n redirect = True\n location = (header.split(\" \")[1])\n if location == uri:\n print(\"This site keeps redirecting to itself and returning 302's Something is wrong\")\n redirect = False\n break\n elif '101' in status_line:\n use_upgrade = False\n location = uri\n redirect = True\n elif '500' in status_line or '505' in status_line:\n print(\"Recieved a 5xx response from the server at location: \" + uri +\" exiting now...\")\n sys.exit()\n elif '404' in status_line:\n print(\"The specified host exists but the path \" + path + \" was not found\")\n sys.exit()\n else:\n print('An unexpected response status of ' +status_line.split(\" \")[1] +' was received from site \"' + uri +'\"')\n sys.exit()\n\n response_status = status_line.split(\" \")[1]\n tup = (\n response_status,\n response_headers,\n highest_http,\n redirect,\n location, use_https,\n use_upgrade\n )\n return tup",
"def protocol(self):\n return self._host[CONF_PROTOCOL]",
"def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10",
"def get_server_version(self):\n return self.__aceQLHttpApi.get_server_version()",
"def ClientVersion(self):\n return (self.VERSION_MAJOR, self.VERSION_MINOR)",
"def set_version(self, protocol_version):\n self.version = protocol_version\n self.version_bytes = str(protocol_version).encode(\"latin1\")\n self.version_header = self.version_bytes + PROTOCOL_3x_HEADER\n if protocol_version == 3.2: # 3.2 behaves like 3.3 with type_0d\n # self.version = 3.3\n self.dev_type = \"type_0d\"\n elif protocol_version == 3.4:\n self.dev_type = \"v3.4\"",
"def format_tls_string(version):\n version = version.replace(\"TLS\", \"TLS \")\n protocol, number = version.split()\n return f\"{protocol} {int(number) / 10}\"",
"def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)",
"def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value",
"def test_raxmlHPC_supported_version(self):\r\n acceptable_version = [(7, 3, 0), (7, 3, 0)]\r\n self.assertTrue(which('raxmlHPC'),\r\n \"raxmlHPC not found. This may or may not be a problem depending on \" +\r\n \"which components of QIIME you plan to use.\")\r\n command = \"raxmlHPC -v | grep version\"\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n version_string = stdout.strip().split(' ')[4].strip()\r\n try:\r\n version = tuple(map(int, version_string.split('.')))\r\n pass_test = version in acceptable_version\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported raxmlHPC version. %s is required, but running %s.\"\r\n % ('.'.join(map(str, acceptable_version)), version_string))",
"def getSupportedApiVersions(self):\n return self.supported_api_version",
"def version(self):\n version = self.get_rpc().getnetworkinfo()[\"subversion\"]\n version = version.replace(\"/\", \"\").replace(\"Satoshi:\", \"v\")\n return version",
"def version(cli, platform):\n version_client = PolyaxonClient().version\n cli = cli or not any([cli, platform])\n if cli:\n try:\n server_version = version_client.get_cli_version()\n except AuthorizationError:\n session_expired()\n sys.exit(1)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get cli version.')\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n cli_version = get_current_version()\n Printer.print_header('Current cli version: {}.'.format(cli_version))\n Printer.print_header('Supported cli versions:')\n dict_tabulate(server_version.to_dict())\n\n if platform:\n try:\n platform_version = version_client.get_platform_version()\n except AuthorizationError:\n session_expired()\n sys.exit(1)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get platform version.')\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n chart_version = version_client.get_chart_version()\n Printer.print_header('Current platform version: {}.'.format(chart_version.version))\n Printer.print_header('Supported platform versions:')\n dict_tabulate(platform_version.to_dict())",
"def pyversion(ref=None):\n import platform\n ver = platform.python_version()\n if ref:\n return [\n int(x) for x in ver.split(\".\")[:2]\n ] >= [\n int(x) for x in ref.split(\".\")[:2]\n ]\n else: return ver",
"def get_protocols(self):\r\n\r\n return None",
"def supports_http_1_1():",
"def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)",
"def test_tls_min_version(self):\n self.x509 = x509main(host=self.cluster.master)\n self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)\n for server in self.cluster.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.cluster.servers)\n self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)\n self.x509.upload_client_cert_settings(server=self.cluster.servers[0])\n\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.3')\n if not status:\n self.fail(\"Setting tls min version to 1.3 failed with content {0}\".format(content))\n\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"fail\")\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.3\", expect=\"pass\")\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.2')\n if not status:\n self.fail(\"Setting tls min version to 1.2 failed with content {0}\".format(content))\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"pass\")\n\n self.x509 = x509main(host=self.cluster.master)\n self.x509.teardown_certs(servers=self.cluster.servers)",
"async def get_version(self) -> str or bool:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n\n # tell the user we are getting the version\n self.add_to_output(\"Getting version...\")\n retries = 0\n while True:\n # open a connection to the [cgminer, bmminer, bosminer] API port (4028)\n connection_fut = asyncio.open_connection(self.ip, 4028)\n try:\n # get reader and writer streams from connection\n reader, writer = await asyncio.wait_for(connection_fut, timeout=5)\n # send the standard version command (JSON)\n writer.write(b'{\"command\":\"version\"}')\n # wait until command is finished sending\n await writer.drain()\n # read the returned data\n data = await reader.read(4096)\n # let the user know we recieved data\n self.add_to_output(\"Recieved data...\")\n # close the writer\n writer.close()\n # make sure the writer is fully closed\n await writer.wait_closed()\n # load the returned data (JSON), and remove the null byte at the end\n data_dict = json.loads(data[:-1].decode('utf-8'))\n # tell the user the version of the miner\n self.add_to_output(f'Version is {data_dict[\"VERSION\"][0][list(data_dict[\"VERSION\"][0].keys())[1]]}...')\n if \"BOSminer+\" in data_dict[\"VERSION\"][0].keys() or \"BOSminer\" in data_dict[\"VERSION\"][0].keys():\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send the command and store the result\n try:\n result = await conn.run(\"cat /etc/bos_version\")\n version_base = result.stdout\n version_base = version_base.strip()\n version_base = version_base.split(\"-\")\n version = version_base[-2]\n if version == NEWEST_VERSION:\n return \"New\"\n else:\n return \"BOS+\"\n except:\n return \"BOS+\"\n else:\n return \"Antminer\"\n except asyncio.exceptions.TimeoutError:\n # we have no version, the connection timed out\n self.add_to_output(\"Get version failed...\")\n return False\n except ConnectionRefusedError:\n # add to retry times\n retries += 1\n # connection was refused, tell the user\n self.add_to_output(\"Connection refused, retrying...\")\n # make sure it doesnt get stuck here\n if retries > 3:\n self.add_to_output('Connection refused, attempting install...')\n return \"Antminer\"\n await asyncio.sleep(3)\n except:\n self.add_to_output(\"Unknown error getting version, attempting install...\")\n return \"Antminer\"",
"def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)",
"def tls_max_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tls_max_version\")",
"def family_versions(self):\n return ['1.0']",
"def supported_version(version, minimum, maximum):\n if minimum and StrictVersion(version) < StrictVersion(minimum):\n return False\n\n if maximum and StrictVersion(version) > StrictVersion(maximum):\n return False\n\n return True",
"def rpc_version(self):\n\t\tvinfo = {'version': version.version, 'version_info': version.version_info._asdict()}\n\t\tvinfo['rpc_api_version'] = version.rpc_api_version\n\t\treturn vinfo",
"def _get_api_version(self):\n with self.nb_session.get(\n self.nb_api_url, timeout=10,\n verify=(not settings.NB_INSECURE_TLS)) as resp:\n result = float(resp.headers[\"API-Version\"])\n log.info(\"Detected NetBox API v%s.\", result)\n return result",
"def solver_version(self):\n major = self._stub.List(self._message).solver_major_version\n minor = self._stub.List(self._message).solver_minor_version\n version = str(major) + \".\" + str(minor)\n return version",
"def best_version(version, versions):\n compatibles = (v for v in versions if v.startswith(version))\n sorted_compatibles = sorted(compatibles, reverse=True, key=_version_key)\n return next(iter(sorted_compatibles), None)",
"def check_python_version(match, current=None):\n if current is None:\n current = list(sys.version_info[:3])\n if not isinstance(match, list):\n match = [match]\n for m in match:\n minimal = False\n if isinstance(m, float):\n m = str(m)\n if m.endswith(\"+\"):\n minimal = True\n m = m[:-1]\n # assert m[0].isdigit()\n # assert m[-1].isdigit()\n m = [int(x) for x in m.split(\".\")]\n current_len = current[: len(m)]\n # print(m, current, current_len)\n if minimal:\n if current_len >= m:\n return True\n else:\n if current_len == m:\n return True\n return False",
"def pyzmq_version_info():\n import re\n parts = re.findall('[0-9]+', __version__)\n parts = [ int(p) for p in parts ]\n if 'dev' in __version__:\n parts.append(float('inf'))\n return tuple(parts)",
"def _get_supported_grype_db_version() -> str:\n grype_wrapper = GrypeWrapperSingleton.get_instance()\n try:\n version_response = grype_wrapper.get_grype_version()\n except CommandException as exc:\n raise GrypeVersionCommandError() from exc\n try:\n return str(version_response[\"supportedDbSchema\"])\n except KeyError as exc:\n raise InvalidGrypeVersionResponse(json.dumps(version_response)) from exc",
"def interfaceVersion( self ):\n\t\treturn 1",
"def trafficProtocol(self):\n #\n # TODO: Reimplement this if possible.\n #\n return client.trafficProtocol(self)",
"def version(self):\n r = requests.get(\"http://%s/api/version\" %(self.url), headers=self.headers)\n if r.status_code == 200:\n return True, r.content\n else:\n return False, {}",
"def test_VersionWire():\n # verRelayTxFalse and verRelayTxFalseEncoded is a version message as of\n # BIP0037Version with the transaction relay disabled.\n verRelayTxFalse = baseVersionBIP0037()\n verRelayTxFalse.disableRelayTx = True\n verRelayTxFalseEncoded = baseVersionBIP0037Encoded()\n verRelayTxFalseEncoded[-1] = 0\n\n bv = baseVersionBIP0037()\n tests = [\n (bv, bv, baseVersionBIP0037Encoded()),\n (verRelayTxFalse, verRelayTxFalse, verRelayTxFalseEncoded),\n ]\n\n for msgIn, msgOut, msgEnc in tests:\n # Encode the message to wire format.\n b = msgIn.btcEncode(wire.ProtocolVersion)\n assert b == msgEnc\n\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(msgEnc, wire.ProtocolVersion)\n assert sameMsgVersion(msg, msgOut)",
"def compare_versions(self, version1, version2):\n max_segments = max(len(version1.split(\".\")), len(version2.split(\".\")))\n return cmp(self.__normalize_version(version1, desired_segments=max_segments), self.__normalize_version(version2, desired_segments=max_segments))"
] |
[
"0.7081918",
"0.70504236",
"0.6895096",
"0.63376284",
"0.63264036",
"0.63264036",
"0.61619365",
"0.60787296",
"0.60647607",
"0.6016684",
"0.5973124",
"0.5794255",
"0.57867813",
"0.5767949",
"0.5725214",
"0.5694531",
"0.5666855",
"0.56289035",
"0.562115",
"0.5615987",
"0.5604014",
"0.5587643",
"0.5484007",
"0.547843",
"0.54692096",
"0.54465586",
"0.5434421",
"0.54283506",
"0.541454",
"0.5408799",
"0.54056",
"0.5349933",
"0.53410196",
"0.53089124",
"0.53073025",
"0.53043526",
"0.52942526",
"0.52898973",
"0.52746695",
"0.52692145",
"0.5268515",
"0.52639985",
"0.52565354",
"0.5251568",
"0.52471966",
"0.5239605",
"0.5233793",
"0.522129",
"0.52202934",
"0.5215805",
"0.5203077",
"0.52030045",
"0.5201807",
"0.51591563",
"0.5157518",
"0.5149703",
"0.5145795",
"0.5135185",
"0.5134351",
"0.5130322",
"0.51256466",
"0.5118394",
"0.511561",
"0.51101893",
"0.5102376",
"0.510138",
"0.50951505",
"0.50910944",
"0.5084594",
"0.50828314",
"0.5080374",
"0.5071295",
"0.50657976",
"0.5058183",
"0.5057636",
"0.504953",
"0.5039135",
"0.50372654",
"0.5036151",
"0.5031092",
"0.5029566",
"0.50275695",
"0.50230527",
"0.5013896",
"0.50129956",
"0.50104433",
"0.50091285",
"0.50082546",
"0.50016326",
"0.5001316",
"0.49876934",
"0.49871325",
"0.49820855",
"0.49739584",
"0.4970495",
"0.49617654",
"0.49613658",
"0.49572867",
"0.49532413",
"0.49508798"
] |
0.8174987
|
0
|
This function is responsible for parsing, validating and making all necessary comparisons between provided and supported protocol versions.
|
def compare_protocol_versions(self, session):
# First parse protocol version strings to check for invalid formatting
invalid_string = self.parse_protocol_version(
[self.earliest_protocol_version, self.latest_protocol_version])
if invalid_string is not None:
# Error during protocol string parsing
data = ('earliest_protocol_version'
if invalid_string == self.earliest_protocol_version else 'latest_protocol_version')
raise ClientRequestError('InvalidParameterValue', data=data)
# Check if protocol version is supported and define the one to use
self.protocol_version = self.determine_supported_protocol(
self.earliest_protocol_version, self.latest_protocol_version)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _sanityCheckProtocolVersions(other):\n if other.minVersion > other.maxVersion:\n raise ValueError(\"Versions set incorrectly\")\n if other.minVersion not in KNOWN_VERSIONS:\n raise ValueError(\"minVersion set incorrectly\")\n if other.maxVersion not in KNOWN_VERSIONS:\n raise ValueError(\"maxVersion set incorrectly\")\n\n if other.maxVersion < (3, 4):\n other.versions = [i for i in other.versions if i < (3, 4)]",
"def parse_protocol_version(self, version_string_list):\n # Verify for every provided string if it is in proper versioning format\n for version_string in version_string_list:\n\n try:\n parsed_version_string = version_string.split('.')\n if len(parsed_version_string) == 1 and version_string.isdigit():\n # No dots in version string, it is a simple integer.\n continue\n\n StrictVersion(version_string)\n\n except (AttributeError, ValueError):\n LOG.debug('Invalid protocol version string provided')\n return version_string\n\n # Check for malformatting\n for i in range(len(parsed_version_string)):\n if len(parsed_version_string[i]) > 1:\n if parsed_version_string[i][0] == '0': # Leading 0's\n return version_string\n if len(parsed_version_string[i]) < 1: # Empty strings\n return version_string\n\n # Protocol version formating: OK\n return None",
"def determine_supported_protocol(self, earliest, latest):\n earliest = int(earliest.split('.')[0])\n latest = int(latest.split('.')[0])\n if earliest <= latest:\n supported = range(earliest, latest + 1)\n for version in (reversed(supported)):\n if version in RPCS.SUPPORTED_PROTOCOL_VERSIONS:\n return str(version)\n\n # If no common protocol version is found, raise fatal error\n raise ClientRequestError('NoValidProtocolVersionInCommon')",
"def do_protocol_version(self):\n return \"2\", True",
"def test_parse_version():\n version = VersionUtils.parse_version('9.5.3')\n assert version == VersionInfo(9, 5, 3)\n\n # Test #.# style versions\n v10_2 = VersionUtils.parse_version('10.2')\n assert v10_2 == VersionInfo(10, 2, 0)\n\n v11 = VersionUtils.parse_version('11')\n assert v11 == VersionInfo(11, 0, 0)\n\n # Test #beta# style versions\n beta11 = VersionUtils.parse_version('11beta3')\n assert beta11 == VersionInfo(11, 0, 0, prerelease='beta.3')\n\n assert v10_2 < beta11\n assert v11 > beta11\n\n # Test #rc# style versions\n version = VersionUtils.parse_version('11rc1')\n assert version == VersionInfo(11, 0, 0, prerelease='rc.1')\n\n # Test #nightly# style versions\n version = VersionUtils.parse_version('11nightly3')\n assert version == VersionInfo(11, 0, 0, 'nightly.3')\n\n v12_3_tde = VersionUtils.parse_version('12.3_TDE_1.0')\n assert v12_3_tde == VersionInfo(12, 3, 0)",
"def compare_versions(current_version, supported_version):\n try:\n current = current_version.split(\".\")\n supported = supported_version.split(\".\")\n\n if int(current[0]) < int(supported[0]):\n return False\n if int(current[0]) > int(supported[0]):\n return True\n return int(current[1]) >= int(supported[1])\n # pylint: disable=W0703\n except Exception:\n logger.info(\"issues parsing version\")\n return False",
"def protocol(ver):\r\n if ver == 1:\r\n return 1\r\n\r\n if ver == 2:\r\n return 2\r\n\r\n\r\n raise ValueError",
"def test_parse_version(self):\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B110SPC003'),\n [100, 1, 0, 110, 3],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012'),\n [100, 1, 0, 60, 12],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),\n [100, 1, 0, 60, 12],\n )\n # Incorrect number of digits\n self.assertEqual(\n _parse_sw_version('BaiStation_V10R001C00B060SPC012'),\n None,\n )\n self.assertEqual(\n _parse_sw_version('XYZ123'),\n None,\n )\n self.assertEqual(\n _parse_sw_version(''),\n None,\n )",
"def _supported_versions(self, jarm_details, grease):\n if (jarm_details[7] == \"1.2_SUPPORT\"):\n # TLS 1.3 is not supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\"]\n else:\n # TLS 1.3 is supported.\n tls = [b\"\\x03\\x01\", b\"\\x03\\x02\", b\"\\x03\\x03\", b\"\\x03\\x04\"]\n\n # Change supported version order, by default, the versions are from\n # oldest to newest.\n if jarm_details[8] != \"FORWARD\":\n tls = self._cipher_mung(tls, jarm_details[8])\n\n # Assemble the extension.\n ext = b\"\\x00\\x2b\"\n # Add GREASE if applicable.\n if grease:\n versions = self._choose_grease()\n else:\n versions = b\"\"\n\n for version in tls:\n versions += version\n\n second_length = len(versions)\n first_length = second_length+1\n ext += struct.pack(\">H\", first_length)\n ext += struct.pack(\">B\", second_length)\n ext += versions\n\n return ext",
"def compare(v1=\"\", v2=\"\"):\n if any([v1 == \"\", v2 == \"\"]):\n return 'One or both versions are not provided.'\n\n characters1 = list(v1)\n characters2 = list(v2)\n\n if not characters1.index('.'):\n return 'v1 is in wrong format'\n if not characters2.index('.'):\n return 'v2 is in wrong format'\n\n def extract_number(characters):\n working_list = []\n resulting_list = []\n dot_index = characters.index('.')\n go_on = True\n for i in range(dot_index):\n if characters[i] == '0' and go_on:\n continue\n go_on = False\n working_list.append(characters[i])\n if not working_list:\n working_list.append('0')\n num_str = ''.join(working_list)\n resulting_list.append(num_str)\n resulting_list.append('.')\n working_list.clear()\n go_on = True\n for i in range(len(characters)-(dot_index+1)):\n index = i + (dot_index+1)\n if characters[index] == '0' and go_on:\n continue\n go_on = False\n working_list.append(characters[index])\n if not working_list:\n working_list.append('0')\n num_str = ''.join(working_list)\n resulting_list.append(num_str)\n return resulting_list\n\n list1 = extract_number(characters1)\n list2 = extract_number(characters2)\n\n def check(a_list):\n if a_list[0].isdigit() and a_list[2].isdigit():\n return True\n return False\n\n if not check(list1):\n return 'Invalid input - {}'.format(v1)\n if not check(list2):\n return 'Invalid input - {}'.format(v2)\n\n if list1[0] > list2[0]:\n return 'Version {0} is greater than Version {1}'.format(v1, v2)\n elif list1[0] < list2[0]:\n return 'Version {0} is smaller than Version {1}'.format(v1, v2)\n else:\n if list1[2] > list2[2]:\n return 'Version {0} is greater than Version {1}'.format(v1, v2)\n elif list1[2] < list2[2]:\n return 'Version {0} is smaller than Version {1}'.format(v1, v2)\n else:\n return 'Version {0} is equal to Version {1}'.format(v1, v2)",
"def version_compare(compare_ver, min_version, max_version):\n if max_version == \"*\":\n return True\n if max_version == \"-\" or not max_version:\n max_version = \"0\"\n if not min_version or min_version == \"*\" or min_version == \"-\":\n min_version = \"0\"\n if compare_ver == \"-\" or compare_ver == \"*\":\n compare_ver = \"0\"\n if compare_ver == min_version or compare_ver == max_version:\n return True\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n # If all versions follow proper versioning then perform a simple numerical comparison\n if len(compare_ver_parts) == len(min_version_parts) and len(\n compare_ver_parts\n ) == len(max_version_parts):\n compare_ver_num = normalise_num(compare_ver, len(compare_ver_parts))\n min_version_num = normalise_num(min_version, len(compare_ver_parts))\n max_version_num = normalise_num(max_version, len(compare_ver_parts))\n if compare_ver_num >= min_version_num and compare_ver_num <= max_version_num:\n return True\n\n normal_len = len(compare_ver_parts)\n if len(min_version_parts) > normal_len:\n normal_len = len(min_version_parts)\n if len(max_version_parts) > normal_len:\n normal_len = len(max_version_parts)\n\n # Normalise the version numbers to be of same length\n compare_ver = normalise_version_str(compare_ver, normal_len)\n min_version = normalise_version_str(min_version, normal_len)\n max_version = normalise_version_str(max_version, normal_len)\n\n compare_ver_parts = str(compare_ver).split(\".\")\n min_version_parts = str(min_version).split(\".\")\n max_version_parts = str(max_version).split(\".\")\n\n for i in range(0, normal_len):\n if (\n not compare_ver_parts[i].isdigit()\n or not min_version_parts[i].isdigit()\n or not max_version_parts[i].isdigit()\n ):\n if (\n compare_ver_parts[i] == min_version_parts[i]\n and compare_ver_parts[i] == max_version_parts[i]\n ):\n continue\n else:\n return False\n elif int(compare_ver_parts[i]) >= int(min_version_parts[i]) and int(\n compare_ver_parts[i]\n ) <= int(max_version_parts[i]):\n continue\n else:\n return False\n return True",
"def test_VersionOptionalFields():\n # onlyRequiredVersion is a version message that only contains the\n # required versions and all other values set to their default values.\n onlyRequiredVersion = minimumMsgVersion()\n\n onlyRequiredVersionEncoded = baseVersionEncoded()[:-55]\n\n # addrMeVersion is a version message that contains all fields through\n # the AddrMe field.\n addrMe = netaddress.NetAddress(\n ip=\"127.0.0.1\", port=8333, services=wire.SFNodeNetwork, stamp=0,\n )\n addrMeVersion = minimumMsgVersion()\n addrMeVersion.addrMe = addrMe\n\n addrMeVersionEncoded = baseVersionEncoded()[:-29]\n\n # nonceVersion is a version message that contains all fields through\n # the Nonce field.\n nonceVersion = minimumMsgVersion()\n nonceVersion.addrMe = addrMe\n nonceVersion.nonce = 123123 # 0x1e0f3\n nonceVersionEncoded = baseVersionEncoded()[:-21]\n\n # uaVersion is a version message that contains all fields through\n # the UserAgent field.\n uaVersion = minimumMsgVersion()\n uaVersion.addrMe = addrMe\n uaVersion.nonce = 123123\n uaVersion.userAgent = \"/dcrdtest:0.0.1/\"\n uaVersionEncoded = baseVersionEncoded()[:-4]\n\n # lastBlockVersion is a version message that contains all fields\n # through the LastBlock field.\n lastBlockVersion = minimumMsgVersion()\n lastBlockVersion.addrMe = addrMe\n lastBlockVersion.nonce = 123123\n lastBlockVersion.userAgent = \"/dcrdtest:0.0.1/\"\n lastBlockVersion.lastBlock = 234234 # 0x392fa\n lastBlockVersionEncoded = baseVersionEncoded()\n\n tests = [\n (onlyRequiredVersion, onlyRequiredVersionEncoded),\n (addrMeVersion, addrMeVersionEncoded),\n (nonceVersion, nonceVersionEncoded),\n (uaVersion, uaVersionEncoded),\n (lastBlockVersion, lastBlockVersionEncoded),\n ]\n\n for expMsg, buf in tests:\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(buf, wire.ProtocolVersion)\n assert sameMsgVersion(msg, expMsg)",
"def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))",
"def check_http_request_validity(http_raw_data) -> HttpRequestState:\n\n global version\n r1 = http_raw_data.split('\\n')[0]\n r2 = http_raw_data.split('\\n')[1]\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"/\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1) != None) and (re.search(\"http://\", r1) != None) and (re.search(\"HTTP/1.0\", r1) != None):\n return HttpRequestState.GOOD\n\n if (re.search(\"GET\", r1)!=None) and (re.search(\"/\", r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None) :\n if (re.search(\":\", r2) == None) :\n return HttpRequestState.INVALID_INPUT\n\n if(re.search(\"GOAT\", r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" , r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\", r1) != None) and (re.search(\":\", r2)):\n\n return HttpRequestState.NOT_SUPPORTED\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\" ,r1)!=None) and (re.search(\"/\",r1)!=None) and (re.search(\"HTTP/1.0\",r1)!=None):\n return HttpRequestState.INVALID_INPUT\n\n if (re.search(\"HEAD\"or\"POST\" or \"PUT\", r1) != None) and (re.search(\"HTTP/1.0\", r1) == None) and (re.search(\":\", r2) != None):\n return HttpRequestState.INVALID_INPUT\n print(\"*\" * 50)\n print(\"[check_http_request_validity] Implement me!\")\n print(\"*\" * 50)\n\n return HttpRequestState.PLACEHOLDER",
"def supports_http_1_1():",
"def _check_compat(sock_info):\n ...",
"def test_get_protocol_version(self):\n server, client = loopback()\n client_protocol_version = client.get_protocol_version()\n server_protocol_version = server.get_protocol_version()\n\n assert isinstance(server_protocol_version, int)\n assert isinstance(client_protocol_version, int)\n\n assert server_protocol_version == client_protocol_version",
"def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions",
"def compare_versions(self, version1, version2):\n max_segments = max(len(version1.split(\".\")), len(version2.split(\".\")))\n return cmp(self.__normalize_version(version1, desired_segments=max_segments), self.__normalize_version(version2, desired_segments=max_segments))",
"def rangeCompare(reqtuple, provtuple):\n (reqn, reqf, (reqe, reqv, reqr)) = reqtuple\n (n, f, (e, v, r)) = provtuple\n if reqn != n:\n return 0\n\n # unversioned satisfies everything\n if not f or not reqf:\n return 1\n\n # and you thought we were done having fun\n # if the requested release is left out then we have\n # to remove release from the package prco to make sure the match\n # is a success - ie: if the request is EQ foo 1:3.0.0 and we have \n # foo 1:3.0.0-15 then we have to drop the 15 so we can match\n if reqr is None:\n r = None\n if reqe is None:\n e = None\n if reqv is None: # just for the record if ver is None then we're going to segfault\n v = None\n\n # if we just require foo-version, then foo-version-* will match\n if r is None:\n reqr = None\n\n rc = compareEVR((e, v, r), (reqe, reqv, reqr))\n\n # does not match unless\n if rc >= 1:\n if reqf in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['EQ', 8, '=']:\n if f in ['LE', 10, 'LT', 2,'<=', '<']:\n return 1\n if reqf in ['LE', 'LT', 'EQ', 10, 2, 8, '<=', '<', '=']:\n if f in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n\n if rc == 0:\n if reqf in ['GT', 4, '>']:\n if f in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['GE', 12, '>=']:\n if f in ['GT', 'GE', 'EQ', 'LE', 4, 12, 8, 10, '>', '>=', '=', '<=']:\n return 1\n if reqf in ['EQ', 8, '=']:\n if f in ['EQ', 'GE', 'LE', 8, 12, 10, '=', '>=', '<=']:\n return 1\n if reqf in ['LE', 10, '<=']:\n if f in ['EQ', 'LE', 'LT', 'GE', 8, 10, 2, 12, '=', '<=', '<' , '>=']:\n return 1\n if reqf in ['LT', 2, '<']:\n if f in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n if rc <= -1:\n if reqf in ['GT', 'GE', 'EQ', 4, 12, 8, '>', '>=', '=']:\n if f in ['GT', 'GE', 4, 12, '>', '>=']:\n return 1\n if reqf in ['LE', 'LT', 10, 2, '<=', '<']:\n return 1\n# if rc >= 1:\n# if reqf in ['GT', 'GE', 4, 12, '>', '>=']:\n# return 1\n# if rc == 0:\n# if reqf in ['GE', 'LE', 'EQ', 8, 10, 12, '>=', '<=', '=']:\n# return 1\n# if rc <= -1:\n# if reqf in ['LT', 'LE', 2, 10, '<', '<=']:\n# return 1\n\n return 0",
"def _get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules",
"def test_VersionWireErrors():\n # Use protocol version 60002 specifically here instead of the latest because\n # the test data is using bytes encoded with that protocol version.\n pver = 60002\n\n # Get a base version, and change the user agent to exceed max limits.\n bvc = baseVersion()\n exceedUAVer = bvc\n newUA = \"/\" + \"t\" * (msgversion.MaxUserAgentLen - 8 + 1) + \":0.0.1/\"\n exceedUAVer.userAgent = newUA\n\n # Encode the new UA length as a varint.\n newUAVarIntBuf = wire.writeVarInt(pver, len(newUA))\n\n # Make a new buffer big enough to hold the base version plus the new\n # bytes for the bigger varint to hold the new size of the user agent\n # and the new user agent string. Then stitch it all together.\n bvEnc = baseVersionEncoded()\n exceedUAVerEncoded = ByteArray()\n exceedUAVerEncoded += bvEnc[0:80]\n exceedUAVerEncoded += newUAVarIntBuf\n exceedUAVerEncoded += newUA.encode()\n exceedUAVerEncoded += bvEnc[97:100]\n\n with pytest.raises(DecredError):\n msgversion.MsgVersion.btcDecode(exceedUAVerEncoded, pver)\n\n bv = baseVersion()\n bv.userAgent = \"t\" * msgversion.MaxUserAgentLen + \"1\"\n\n with pytest.raises(DecredError):\n bv.btcEncode(pver)",
"def valid_version(self, new_version):\n if not re.match(r\"\\d+\\.\\d+\\.\\d+\", new_version):\n return False\n\n x1, y1, z1 = [int(i) for i in self.current_version().split(\".\")]\n x2, y2, z2 = [int(i) for i in new_version.split(\".\")]\n\n if x2 < x1:\n return False\n\n if x2 == x1 and y2 < y1:\n return False\n\n if x2 == x1 and y2 == y1 and z2 <= z1:\n return False\n\n return True",
"def validate_backend_version(self):\n pass",
"def test_load_protocol():\n\n # version 0.0.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,0,0))))\n\n # version 0.1.0 files\n for i in [0]:\n yield load_protocol, (path(__file__).parent /\n path('protocols') /\n path('protocol %d v%s' % (i, Version(0,1,0))))",
"def test_version(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\")\n assert bb.version == \"1.0\"\n\n bb = parse_input(\"name testname\\nversion 1.12\")\n assert bb.version == \"1.12\"",
"def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh",
"def test_schema_version(self):\n\n self.validator.adata.uns[\"schema_version\"] = \"1.0.0\"\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Schema version '1.0.0' is not supported. Current supported versions: '['2.0.0']'. \"\n \"Validation cannot be performed.\"\n ],\n )",
"def get_version_rules(self, vuln_versions):\n rules = []\n regex_op = \"[0-9a-zA-Z\\\\_\\\\.\\\\-]+\"\n regex_vr = \"[<>=*]+\"\n \"\"\"For all the vulnerable versions information that we get, we need to create\n comparable version object so that we can apply these rules on top of all the available\n versions of a pkg in the market.\"\"\"\n for version in vuln_versions:\n version = version.replace(\" \", \"\")\n version = version.replace(\"+incompatible\", \"\")\n sub_vers = version.split('||')\n for sub_ver in sub_vers:\n tmp = []\n vr_relations = re.split(regex_vr, sub_ver)\n op_relations = re.split(regex_op, sub_ver)\n # Single affected version.\n if len(vr_relations) == 1:\n tmp.append({\n 'key': \"=\",\n 'val': ComparableVersion(vr_relations[0])\n })\n # All versions affected.\n elif len(op_relations) == 1 and op_relations[0] == '*':\n tmp.append({\n 'key': \"*\",\n 'val': \"\"\n })\n else:\n for i in range(len(op_relations) - 1):\n tmp.append({\n 'key': op_relations[i],\n 'val': ComparableVersion(vr_relations[i + 1])\n })\n rules.append(tmp)\n\n return rules",
"def test_VersionWire():\n # verRelayTxFalse and verRelayTxFalseEncoded is a version message as of\n # BIP0037Version with the transaction relay disabled.\n verRelayTxFalse = baseVersionBIP0037()\n verRelayTxFalse.disableRelayTx = True\n verRelayTxFalseEncoded = baseVersionBIP0037Encoded()\n verRelayTxFalseEncoded[-1] = 0\n\n bv = baseVersionBIP0037()\n tests = [\n (bv, bv, baseVersionBIP0037Encoded()),\n (verRelayTxFalse, verRelayTxFalse, verRelayTxFalseEncoded),\n ]\n\n for msgIn, msgOut, msgEnc in tests:\n # Encode the message to wire format.\n b = msgIn.btcEncode(wire.ProtocolVersion)\n assert b == msgEnc\n\n # Decode the message from wire format.\n msg = msgversion.MsgVersion.btcDecode(msgEnc, wire.ProtocolVersion)\n assert sameMsgVersion(msg, msgOut)",
"def check(self, expected):\n versions = ['3.0', '4.0', '5.0', '6.0', '7.0', '8.0']\n modes = ['strict', 'normal', 'ignore']\n\n for version in versions:\n for mode in modes:\n assert self.get(app_version=version, compat_mode=mode) == (\n expected['-'.join([version, mode])])",
"def validate_http_request(request):\r\n if request != b'':\r\n # Divide the request line: [method, sp, url, version, cr lf]\r\n request = request.decode().split('\\r')[0]\r\n method = request.split()[0]\r\n url = request.split()[1]\r\n version = request.split()[2]\r\n if method == METHOD and version == VERSION:\r\n return True, url\r\n else:\r\n return False, None\r\n else:\r\n return True, None",
"def check_capability_negotiation(\n self, environ, start_response, response_headers):\n ua = sa = None\n if \"HTTP_DATASERVICEVERSION\" in environ:\n major, minor, ua = core.parse_dataservice_version(\n environ[\"HTTP_DATASERVICEVERSION\"])\n else:\n major = 2\n minor = 0\n if \"HTTP_MAXDATASERVICEVERSION\" in environ:\n # (unused max_minor)\n max_major, max_minor, sa = core.parse_max_dataservice_version(\n environ[\"HTTP_MAXDATASERVICEVERSION\"]) # noqa\n else:\n max_major = major\n if major > 2 or (major == 2 and minor > 0):\n # we can't cope with this request\n return None\n elif max_major >= 2:\n response_headers.append(\n ('DataServiceVersion', '2.0; pyslet %s' % info.version))\n return 2\n else:\n response_headers.append(\n ('DataServiceVersion', '1.0; pyslet %s' % info.version))\n return 1",
"def compare_version(self, requested_version, op=None, num_parts=3):\n # if we have no specific version, we cannot compare, so assume it's okay\n if not self.specs:\n return True\n\n version = self.specs[0][1]\n op = (op or self.specs[0][0]).strip()\n\n return SimpleVersion.compare_versions(requested_version, op, version)",
"def test_versionComponents(self):\n self.assertEqual(\n (int, int, int),\n tuple(\n type(info) for info\n in [nevow.version.major, nevow.version.minor, nevow.version.micro]))",
"def test_get_protocol_version_name(self):\n server, client = loopback()\n client_protocol_version_name = client.get_protocol_version_name()\n server_protocol_version_name = server.get_protocol_version_name()\n\n assert isinstance(server_protocol_version_name, str)\n assert isinstance(client_protocol_version_name, str)\n\n assert server_protocol_version_name == client_protocol_version_name",
"def check_python_version(match, current=None):\n if current is None:\n current = list(sys.version_info[:3])\n if not isinstance(match, list):\n match = [match]\n for m in match:\n minimal = False\n if isinstance(m, float):\n m = str(m)\n if m.endswith(\"+\"):\n minimal = True\n m = m[:-1]\n # assert m[0].isdigit()\n # assert m[-1].isdigit()\n m = [int(x) for x in m.split(\".\")]\n current_len = current[: len(m)]\n # print(m, current, current_len)\n if minimal:\n if current_len >= m:\n return True\n else:\n if current_len == m:\n return True\n return False",
"async def get_supported_versions(self) -> dict:\n return await self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def compare_versions(fixed_version, target_version):\n for i, j in zip(map(int, fixed_version.split(\".\")), map(int, target_version.split(\".\"))):\n if i == j:\n continue\n return i > j\n return len(fixed_version.split(\".\")) > len(target_version.split(\".\"))",
"def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n main1 = 0\n main2 = 0\n branch1 = 0\n branch2 = 0\n list1 = version1.split('.')\n list2 = version2.split('.')\n for i in range(max(len(list1),len(list2))):\n item1 = 0 if (i > len(list1)-1) else int(list1[i])\n item2 = 0 if (i > len(list2)-1) else int(list2[i])\n if item1 > item2:\n return 1\n if item2 > item1:\n return -1\n return 0",
"def supported_marshaller_api_versions() -> Tuple[str]:\n return (\"1.0\",)",
"def _fetch_api_versions(self):\n log.debug(\"Fetch SASL authentication api versions.\")\n self._broker_connection.request(ApiVersionsRequest())\n response = ApiVersionsResponse(self._broker_connection.response())\n\n self.handshake_version = response.api_versions[SaslHandshakeRequest.API_KEY].max\n self.auth_version = response.api_versions.get(SaslAuthenticateRequest.API_KEY, None)\n\n self.handshake_version = min(self.MAX_HANDSHAKE_VERSION, self.handshake_version)\n if self.auth_version is not None:\n self.auth_version = min(self.auth_version.max, self.MAX_AUTH_VERSION)\n log.debug(\n \"Determinded handshake api version {} and authenticate api version {}\".format(\n self.handshake_version, self.auth_version\n )\n )",
"def check_cal_format_version(version: Optional[Version] = None, current_version: Version = _CAL_FORMAT_VERSION):\n # No version means, the old 1.0 format is used that does not provide a version string\n if not version:\n version = Version(\"1.0.0\")\n if isinstance(version, str):\n version = Version(version)\n\n if version == current_version:\n return\n if version > current_version:\n raise ValueError(\"The provided version, is larger than the currently supported version.\")\n if version < current_version:\n raise ValueError(\n \"The provided calibration format is no longer supported. \"\n \"Check `imucal.legacy` if conversion helper exist.\"\n )",
"def _check_server_version(self, server_version: str) -> None:\n cur_version = parse_version(server_version)\n min_version = parse_version(MIN_SERVER_VERSION)\n if cur_version < min_version:\n raise InvalidServerVersion\n if cur_version != min_version:\n self._logger.warning(\n \"Connected to a Zwave JS Server with an untested version, \\\n you may run into compatibility issues!\"\n )",
"def test_valid_versions(self):\n instance = ClassWithVersion()\n versions = [\"1.2.3\", \"1.2.*\", \"1.*\", \"*\", \"1.1.1\", \"1.0.1rc1\"]\n for version in versions:\n instance.version = version\n self.assertEqual(instance.version(), version)",
"def test_SSLeay_version(self):\n versions = {}\n for t in [\n SSLEAY_VERSION,\n SSLEAY_CFLAGS,\n SSLEAY_BUILT_ON,\n SSLEAY_PLATFORM,\n SSLEAY_DIR,\n ]:\n version = SSLeay_version(t)\n versions[version] = t\n assert isinstance(version, bytes)\n assert len(versions) == 5",
"def parse_protocol_header(stream: BytesIO) -> Tuple[int, int, int]:\n prefix, *version = unpack('>5sBBB', _read(stream, 8))\n if prefix != b'AMQP\\x00':\n raise ValueError(\"wrong protocol, expected b'AMQP\\x00', got {}\".format(\n prefix\n ))\n return version",
"def test_url_line_parser_vanilla(self):\n line = \"GET / HTTP/1.1\"\n method, url, params, version = parser._parse_url_line(line, endpoint)\n self.assertEqual(\"GET\", method)\n self.assertEqual(\"http://test.com/\", url)\n self.assertEqual({}, params)\n self.assertEqual(\"HTTP/1.1\", version)",
"def test_e(self):\n v1 = versions.Version(version='1.2.1', name='foo')\n\n self.assertTrue(v1 < '3.4.5.6')",
"def checkurlsbasic(urlstxt, protocolstxt, mdtype):\n # urlstxt consists of a list of urls, split by ;\n # for each url, check if it has been resolvable by inspecting the HTTP\n # code added to the URL\n score = 2\n if len(urlstxt) > 3: # TODO: take into account that starts with \" (HTTP \" as well, so longer url is needed\n # start with an empty score if there are urls\n score = 0\n urls = urlstxt.split(valuesep) # TODO: make configurable?\n errors = 0\n nrurls = 0\n for u in urls:\n if u != None:\n try:\n # u starts with \" (HTTP 0)\", so must be longer than\n if len(u) > 2 and u != \" (HTTP 0)\":\n # TODO: assume http 2xx, 3xx and 5xx-series (all!) are the only okay HTTP codes?\n # Or use others as well? Or just the start mubers, so 204 is also included\n # for 400-series: HTTP 400 should be okay, since this means that the client sent a wrong request (but the service still exists / works there, e.g. TMS)\n # TODO: use a list of HTTP codes that are okay.\n # Configurable\n nrurls = nrurls + 1\n if u.find(\"(HTTP 2\") == -1 and u.find(\"(HTTP 3\") == -1 and u.find(\"(HTTP 5\") == -1 and u.find(\"(HTTP 400\") == -1 and u.find(\"(HTTP 403\") == -1 and u.find(\"(HTTP intern)\") == -1:\n errors = errors + 1\n except Exception as e:\n logging.info(\"Checking URLs failed for: \" + u)\n logging.debug(str(e))\n errors = errors + 1\n if mdtype == \"dataset\" or mdtype == \"series\":\n if errors == 0:\n score = 2\n # if we don't have urls, but the service type contains OGC: .. , download or website, then we have an error\n # TODO: document\n if (protocolstxt.find(\"OGC:\") > -1 or protocolstxt.find(\"download\") > -1 or protocolstxt.find(\"website\") > -1) and nrurls == 0:\n score = 0\n # checkid = 6, so the index in the matrix is: 5\n result = checksdatasets[5][2][score]\n else:\n # there must be a URL as well, so check this\n if errors > 0 or nrurls == 0:\n score = 0\n else:\n score = 2\n result = checksservices[5][2][score]\n return MkmScore(urlstxt, score, result)",
"def cmp_version(v0, v1):\n\n # Convert both version strings to arrays of ints\n try: \n a0 = [int(f) for f in v0.split('.')]\n a1 = [int(f) for f in v1.split('.')]\n except ValueError as error:\n logger.error(\"invalid version comparison: {} <> {}\".format(v0, v1))\n raise error\n\n # Make sure both arrays have the same length\n while len(a0) < len(a1): a0.append(0)\n while len(a1) < len(a0): a1.append(0)\n \n # Make an array of tuples for comparison\n t = zip(a0, a1)\n\n # the first pair that aren't equal determine the comparison value\n for f in t:\n if f[0] != f[1]:\n return cmp(f[0], f[1])\n\n # They're all equal: the versions are equal\n return 0",
"def compare_versions(version1, version2, format=False):\n v1 = version1 if not format else format_stack_version(version1)\n v2 = version2 if not format else format_stack_version(version2)\n\n max_segments = max(len(v1.split(\".\")), len(v2.split(\".\")))\n return cmp(_normalize(v1, desired_segments=max_segments), _normalize(v2, desired_segments=max_segments))",
"def versionCompare(v1, v2):\n v1t = tuple(map(int, (v1.split(\".\"))))\n v2t = tuple(map(int, (v2.split(\".\"))))\n return v1t > v2t",
"def protocol_version_9():\n print('Setting protocol version to 9')\n upgrade('protocolversion', 'protocol_version', 9)",
"def compare_versions(cls, version_a, op, version_b, ignore_sub_versions=True):\n\n if not version_b:\n return True\n num_parts = 3\n\n if op == '~=':\n num_parts = max(num_parts, 2)\n op = '=='\n ignore_sub_versions = True\n elif op == '===':\n op = '=='\n\n try:\n version_a_key = cls._get_match_key(cls._regex.search(version_a), num_parts, ignore_sub_versions)\n version_b_key = cls._get_match_key(cls._regex.search(version_b), num_parts, ignore_sub_versions)\n except:\n # revert to string based\n for v in cls._sub_versions_pep440:\n version_a = version_a.replace(v, '.')\n version_b = version_b.replace(v, '.')\n\n version_a = (version_a.strip('.').split('.') + ['0'] * num_parts)[:num_parts]\n version_b = (version_b.strip('.').split('.') + ['0'] * num_parts)[:num_parts]\n version_a_key = ''\n version_b_key = ''\n for i in range(num_parts):\n pad = '{:0>%d}.' % max([9, 1 + len(version_a[i]), 1 + len(version_b[i])])\n version_a_key += pad.format(version_a[i])\n version_b_key += pad.format(version_b[i])\n\n if op == '==':\n return version_a_key == version_b_key\n if op == '<=':\n return version_a_key <= version_b_key\n if op == '>=':\n return version_a_key >= version_b_key\n if op == '>':\n return version_a_key > version_b_key\n if op == '<':\n return version_a_key < version_b_key\n raise ValueError('Unrecognized comparison operator [{}]'.format(op))",
"def set_version(self, protocol_version):\n self.version = protocol_version\n self.version_bytes = str(protocol_version).encode(\"latin1\")\n self.version_header = self.version_bytes + PROTOCOL_3x_HEADER\n if protocol_version == 3.2: # 3.2 behaves like 3.3 with type_0d\n # self.version = 3.3\n self.dev_type = \"type_0d\"\n elif protocol_version == 3.4:\n self.dev_type = \"v3.4\"",
"def compare_versions(version1, version2):\n version1 = coerce_version(version1)\n version2 = coerce_version(version2)\n return compare_version_objects(version1, version2)",
"def get_supported_versions(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getSupportedVersions\", API_VERSION),\n filter=attr.filters.include(attr.fields(Body).method),\n ),\n )",
"def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True",
"def test_parse_version(self):\n version = VersionNumberScaleMeasurement.parse_version(None)\n self.assertEqual(Version(\"0\"), version)",
"def test_parse_version():\n version = parse_version(__version__)\n assert type(version) == Version",
"def compareVersion(self, version1, version2):\n v1 = version1.split('.')\n v2 = version2.split('.')\n\n for x, y in zip(v1, v2):\n if int(x) > int(y):\n return 1\n elif int(x) < int(y):\n return -1\n\n # all prefixes are equal\n if len(v1) > len(v2):\n for num in v1[len(v2):]:\n if int(num) > 0:\n return 1\n elif len(v1) < len(v2):\n for num in v2[len(v1):]:\n if int(num) > 0:\n return -1\n return 0",
"def test_version():\n versions = ((2, 7, 16), (3, 5, 7), (3, 6, 8), (3, 7, 3))\n assert sys.version_info[:3] in versions",
"def test_pynast_suported_version(self):\r\n min_acceptable_version = (1, 2)\r\n max_acceptable_version = (1, 2, 2)\r\n try:\r\n from pynast import __version__ as pynast_lib_version\r\n version = pynast_lib_version.split('.')\r\n if version[-1][-4:] == '-dev':\r\n version[-1] = version[-1][:-4]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(pynast_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n\r\n min_version_str = '.'.join(map(str, min_acceptable_version))\r\n max_version_str = '.'.join(map(str, max_acceptable_version))\r\n error_msg = (\"Unsupported pynast version. Must be >= %s and <= %s, \"\r\n \"but running %s.\" % (min_version_str, max_version_str,\r\n version_string))\r\n self.assertTrue(pass_test, error_msg)",
"def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False",
"def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None",
"def test_python_supported_version(self):\r\n min_acceptable_version = (2, 7, 0)\r\n min_unacceptable_version = (3, 0, 0)\r\n\r\n command = 'python --version'\r\n proc = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT)\r\n stdout = proc.stdout.read()\r\n\r\n version_str_matches = re.findall('Python\\s+(\\S+)\\s*', stdout.strip())\r\n self.assertEqual(len(version_str_matches), 1,\r\n \"Could not determine the Python version in '%s'.\" %\r\n stdout)\r\n version_string = version_str_matches[0]\r\n\r\n try:\r\n if version_string[-1] == '+':\r\n version_string = version_string[:-1]\r\n version = tuple(map(int, version_string.split('.')))\r\n if len(version) == 2:\r\n version = (version[0], version[1], 0)\r\n pass_test = (version >= min_acceptable_version and\r\n version < min_unacceptable_version)\r\n except ValueError:\r\n pass_test = False\r\n version_string = stdout\r\n self.assertTrue(pass_test,\r\n \"Unsupported Python version. Must be >= %s and < %s, \"\r\n \"but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, min_unacceptable_version)),\r\n version_string))",
"def is_valid_version(self):\n pass",
"def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())",
"def convert(self, value, param, ctx):\n converted_ver = Version(value)\n if converted_ver < Version('2') or converted_ver >= Version('4'):\n self.fail(\n \"Pulp Smash can test Pulp version 2.y and 3.y. It can't test \"\n 'Pulp version {}.'.format(converted_ver),\n param,\n ctx\n )\n return converted_ver",
"def version_check(version):\n return {\n 1: 'OF10', # 0x01 -> OF1.0\n 3: 'OF12', # 0x03 -> OF1.2\n 4: 'OF13', # 0x04 -> OF1.3\n 5: 'OF14', # 0x05 -> OF1.4\n 6: 'OF15', # 0x06 -> OF1.5\n }.get(version, 0)",
"def _version_support_check(self, v_maps, **kwargs):\n if self.session._invalid_server_version():\n # server version is not valid, force a refresh right now\n self.session.get_server_version(**kwargs)\n\n if self.session._invalid_server_version():\n # server version is STILL invalid, return False\n return False\n\n for v_map in v_maps:\n if not self.session.server_version >= v_map:\n return False\n return True",
"def test_compare_local_version_is_same(self):\n\n given = \"1.0.0.dev (Hello, World)\"\n expected = None\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)",
"def os_is_compatible(required_os_version: str) -> bool:\n\tcurrent_version = [int(c) for c in os_release().split('.')]\n\trequired_version = [int(c) for c in required_os_version.split('.')]\n\n\t# 10.13.6.2 is not (necessarily) compatible with 10.13.6\n\tif len(required_version) > len(current_version) and\\\n\t required_version[0:len(current_version)] == current_version:\n\t return False\n\n\t# Compare versions component-wise\n\tfor (c, r) in zip(current_version, required_version):\n\t\tif c < r:\n\t\t\treturn False\n\n\treturn True",
"def version_match(required, candidate):\n return _discover.version_match(required, candidate)",
"def test_protocols_updated(self):\n assert self.connection_config.protocols == {self.new_protocol_id}",
"def python_version_check():\n min_version_list = PYTHON_MIN_VERSION.split(\".\")\n # Truncate if the list is more the 4 items\n if len(min_version_list) > 4:\n min_version_list = min_version_list[:4]\n # Fill if the list is less then 4 items\n if len(min_version_list) == 1:\n min_version_list.append(\"0\")\n if len(min_version_list) == 2:\n min_version_list.append(\"0\")\n if len(min_version_list) == 3:\n min_version_list.append(\"f0\")\n # Calculate the minimum version and an integer, which, when displayed as\n # hex, is easily recognised as the version. E.g. 0x30502f0 is 3.5.2\n min_version_value = 0\n for index, item in enumerate(min_version_list[::-1]):\n min_version_value = min_version_value + int(item, 16) * 2**(index * 8)\n if debug: print(\"Python Version Minimum:{}, Decimal:{}, Hex:{}\"\n .format(PYTHON_MIN_VERSION, min_version_value,\n hex(min_version_value)))\n # test value and exit if below minimum revision\n if sys.hexversion < min_version_value:\n print(\"Python Version: {}. Required minimum version is: {}. Exiting...\"\n .format(sys.version.split(\" \")[0], PYTHON_MIN_VERSION))\n sys.exit()",
"def compare_ver(a, b):\n if a == b:\n return 0\n\n ap = [int(p) for p in a.split(\".\")]\n bp = [int(p) for p in b.split(\".\")]\n lap = len(ap)\n lbp = len(bp)\n\n # min # of pieces\n mp = lap\n if lbp < mp:\n mp = lbp\n\n for i in range(mp):\n if ap[i] < bp[i]:\n return -1\n if ap[i] > bp[i]:\n return 1\n\n if lap > lbp:\n # a has more pieces, common pieces are the same, a is greater\n return 1\n\n if lap < lbp:\n # a has fewer pieces, common pieces are the same, b is greater\n return -1\n\n # They are exactly the same.\n return 0",
"def checkVersion(self, clientName, edamVersionMajor, edamVersionMinor):\r\n pass",
"def test_parse_invalid_version(self):\n version = VersionNumberScaleMeasurement.parse_version(\"This is not a version number\")\n self.assertEqual(Version(\"0\"), version)",
"def validate_tls_min_version(self, node=None, version=\"1.2\", expect=\"fail\"):\n if node is None:\n node = self.cluster.master\n cmd = self.curl_path + \" -v --tlsv\" + version + \" --tls-max \" + version + \\\n \" -u \" + node.rest_username + \":\" + node.rest_password + \\\n \" https://\" + node.ip + \":18091/pools/ -k\"\n shell = RemoteMachineShellConnection(node)\n o, e = shell.execute_command(cmd)\n if expect == \"fail\":\n if len(o) != 0:\n shell.disconnect()\n self.fail(\"Command worked when it should have failed\")\n else:\n if len(o) == 0 or \"pools\" not in o[0]:\n shell.disconnect()\n self.fail(\"Command failed when it should have worked\")\n shell.disconnect()",
"def compare_versions(v1: str, v2: str):\n # Convert strings to lists of integers\n v1 = [int(num) for num in v1.split('.')]\n v2 = [int(num) for num in v2.split('.')]\n # Pad shortest list by zeros at the end\n l1 = len(v1)\n l2 = len(v2)\n if l1 > l2:\n for i in range(l1 - l2):\n v2.append(0)\n elif l2 > l1:\n for i in range(l2 - l1):\n v1.append(0)\n # Compare versions: as soon as numbers at the same indexes\n # are unequal, return result.\n for i in range(len(v1)):\n if v1[i] > v2[i]:\n return 1\n if v2[i] > v1[i]:\n return -1\n # If all numbers at the same indexes are equal\n return 0",
"def test_new_style_with_version(self):\n self.assertIsNotNone(parse_arxiv_id('1202.1234v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v1'))\n self.assertIsNotNone(parse_arxiv_id('1203.12345v12'))",
"def compare_two_version_strings(version1, version2):\n _version1 = version1.split('.')\n _version2 = version2.split('.')\n version1_len = len(_version1)\n version2_len = len(_version2)\n upper_bound = version1_len if version1_len < version2_len else version2_len\n for i in range(0, upper_bound):\n if _version1[i] > _version2[i]:\n return version2,version1\n\n # Check to make sure that the case where the pair('1.0.0', '1.0')\n # should be ordered as ('1.0', '1.0.0')\n if version1_len > version2_len and _version1[upper_bound-1] == _version2[upper_bound-1]:\n return version2,version1\n return version1,version2",
"def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass",
"def test_lowest_version(self):\n self.assertEqual({\"python-xyz\": \"1\",\n \"python-foo\": \"3.1\"},\n pr.sanitize_requirements(\n [\"xyz>=1,>=2\", \"foo>=4,>=3.1\"]))",
"def test_validate_rdp_version(self):\r\n rdp22_fp = \"/Applications/rdp_classifier_2.2/rdp_classifier-2.2.jar\"\r\n self.assertEqual(validate_rdp_version(rdp22_fp), 2.2)\r\n\r\n invalid_fp = \"/Applications/rdp_classifier_2.2/rdp_classifier.jar\"\r\n self.assertRaises(RuntimeError, validate_rdp_version, invalid_fp)\r\n\r\n rdp20_fp = \"/Applications/rdp_classifier_2.2/rdp_classifier-2.0.jar\"\r\n self.assertRaises(RuntimeError, validate_rdp_version, rdp20_fp)",
"def compare_version(v1, v2):\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1",
"def compare_version(v1, v2):\n s1 = StrictVersion(v1)\n s2 = StrictVersion(v2)\n if s1 == s2:\n return 0\n elif s1 > s2:\n return -1\n else:\n return 1",
"def check_versioning(ctx, stmt):\n\n # Don't perform this check for modules that are not OpenConfig\n # or are OpenConfig infrastructure (e.g., extensions)\n if (OCLintFunctions.is_openconfig_validatable_module(stmt.arg) in\n [ModuleType.NONOC, ModuleType.OCINFRA]):\n return\n\n version = None\n for substmt in stmt.substmts:\n # pyang uses a keyword tuple when the element is from\n # an external extension rather than a built-in, check for\n # this before checking the argument. Assumption is made\n # that openconfig-version is unique across all extension\n # modules.\n if (isinstance(substmt.keyword, tuple) and\n substmt.keyword[1] == \"openconfig-version\"):\n version = substmt\n\n if version is None:\n err_add(ctx.errors, stmt.pos, \"OC_MODULE_MISSING_VERSION\",\n stmt.arg)\n return\n\n if not re.match(r\"^[0-9]+\\.[0-9]+\\.[0-9]+$\", version.arg):\n err_add(ctx.errors, stmt.pos, \"OC_INVALID_SEMVER\",\n version.arg)\n\n # Check that there\n match_revision = False\n for revision_stmt in stmt.search(\"revision\"):\n reference_stmt = revision_stmt.search_one(\"reference\")\n if reference_stmt is not None and reference_stmt.arg == version.arg:\n match_revision = True\n\n if match_revision is False:\n err_add(ctx.errors, stmt.pos, \"OC_MISSING_SEMVER_REVISION\",\n version.arg)",
"def test_split_version(self):\n\n given = \"1.0.0.dev (Hello, World!)\"\n expected = [\"1\", \"0\", \"0\"]\n actual = Version.split_versions(given)\n\n self.assertEqual(expected, actual)",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def test_tls_client_minimum_set(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")\n\n # Also test a string version\n config = {\"federation_client_minimum_tls_version\": \"1\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": \"1.2\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")",
"def test_compare_local_version_is_older(self):\n\n given = \"2.34.0.dev (Hello, World)\"\n expected = True\n actual = Version.compare(given)\n\n self.assertEqual(expected, actual)",
"def is_up_to_date(self, server_version):\r\n client_split = self.__version__.split('.')\r\n client_len = len(client_split)\r\n server_split = server_version.split('.')\r\n server_len = len(server_split)\r\n\r\n # Make both lists the same length\r\n for i in range(client_len, server_len):\r\n client_split.append('0')\r\n for i in range(server_len, client_len):\r\n server_split.append('0')\r\n\r\n for i in range(0, client_len):\r\n if 'b' in client_split[i]:\r\n # Using a beta version, don't check\r\n return True\r\n client = int(client_split[i])\r\n server = int(server_split[i])\r\n if client < server:\r\n return False\r\n elif server < client:\r\n return True\r\n\r\n return True",
"def test_multi_line():\n\n for protocol in LEGACY_PROTOCOLS:\n p = protocol([])\n\n test_case = [\n \"48 6B 10 49 02 01 00 01 02 03 FF\",\n \"48 6B 10 49 02 02 04 05 06 07 FF\",\n \"48 6B 10 49 02 03 08 09 0A 0B FF\",\n ]\n\n correct_data = [0x49, 0x02] + list(range(12))\n\n # in-order\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)\n\n # test a few out-of-order cases\n for n in range(4):\n random.shuffle(test_case) # mix up the frame strings\n r = p(test_case)\n assert len(r) == 1\n check_message(r[0], len(test_case), 0x10, correct_data)",
"def check_from_version(version: str) -> str:\n version_int = [int(v) for v in version.split(\".\")]\n if version_int[0] not in PipetteModelMajorVersion:\n raise ValueError(f\"Major version {version_int[0]} is not supported.\")\n if version_int[1] not in PipetteModelMinorVersion:\n raise ValueError(f\"Minor version {version_int[1]} is not supported.\")\n return version",
"def solr_version_check(core):\n expected_version = SCHEMA[core].version\n solr_uri = config.CFG.get(\"solr\", \"uri\")\n u = urllib2.urlopen(\"%s/%s/schema/version\" % (solr_uri, core))\n content = loads(u.read())\n seen_version = content[\"version\"]\n if not seen_version == expected_version:\n raise VersionMismatchException(core, expected_version, seen_version)\n logger.debug(\"%s: version %1.1f matches %1.1f\", core, expected_version,\n seen_version)",
"def _negotiate_protocols(self, protocols, direction):\n uris = [p.uri for p in protocols]\n if direction in ['pushFromVoSpace', 'pullToVoSpace']:\n supported = list(set(uris) & set(CLIENT_PROTOCOLS))\n else:\n supported = list(set(uris) & set(SERVER_PROTOCOLS))\n if len(supported) == 0: raise VOSpaceError(500, \"The service supports none of the requested Protocols\", summary = PROTOCOL_NOT_SUPPORTED)\n selected = [p for p in protocols if p.uri in supported]\n if direction in ['pullFromVoSpace', 'pushToVoSpace']:\n for protocol in selected:\n protocol.set_endpoint(SERVER_PROTOCOLS[protocol.uri].get_endpoint())\n return selected",
"def test_valid_hh_version():\n # TODO: Basically only enforcing correct main segment, since not using `re.fullmatch`\n # TODO: Probably want `re.fullmatch` here - Currently ignoring any potentially invalid suffix\n version_pattern = r\"^[0-9]+\\.[0-9]+\\.[0-9]+(|a[0-9]|b[0-9]|rc[0-9])\"\n res = re.match(version_pattern, hh.__version__)\n assert res is not None"
] |
[
"0.7269251",
"0.6852072",
"0.68450886",
"0.6574904",
"0.645231",
"0.6396166",
"0.63311136",
"0.6159254",
"0.61516625",
"0.6143125",
"0.6079563",
"0.6043297",
"0.6020891",
"0.5971723",
"0.5925532",
"0.5903397",
"0.58636093",
"0.57814234",
"0.57633996",
"0.5755667",
"0.57490414",
"0.57051283",
"0.5693326",
"0.56907916",
"0.568826",
"0.5685663",
"0.5668759",
"0.56587327",
"0.5655409",
"0.5632572",
"0.56203556",
"0.56168073",
"0.5569955",
"0.55616605",
"0.55351454",
"0.55328625",
"0.5522015",
"0.55099344",
"0.55090547",
"0.55064",
"0.5485873",
"0.54784125",
"0.5475577",
"0.5474718",
"0.5469881",
"0.54679424",
"0.5457446",
"0.54554564",
"0.5451173",
"0.5449266",
"0.5439971",
"0.5438076",
"0.5437458",
"0.5432613",
"0.5416337",
"0.5415012",
"0.5403447",
"0.54029256",
"0.53963006",
"0.5394196",
"0.53903574",
"0.53879166",
"0.5380085",
"0.5372452",
"0.5371388",
"0.5371346",
"0.5364997",
"0.5363955",
"0.53594106",
"0.5354558",
"0.53317237",
"0.5327242",
"0.5322621",
"0.5316069",
"0.53145534",
"0.53104144",
"0.53003436",
"0.5299686",
"0.52896833",
"0.5289087",
"0.52887255",
"0.5286276",
"0.52832395",
"0.5271612",
"0.5261728",
"0.52596194",
"0.5252961",
"0.52490413",
"0.52490413",
"0.5247224",
"0.5243185",
"0.5235301",
"0.5227461",
"0.5226897",
"0.52263814",
"0.52248484",
"0.5217926",
"0.5215432",
"0.52038825",
"0.51968235"
] |
0.7876382
|
0
|
If a client_id is provided together with the inform message, Now we have enough information to get the data from the database
|
def handle_client_id(self, session):
if session['client']['cid'] is not None:
# A subscriber ID may only contain letters, numbers, spaces and
# the following special characters: - _ \ / ( ) # .
p = re.compile('^[A-Za-z0-9-_\\\. #/()]+$')
if p.match(session['client']['cid']) is None:
raise ClientRequestError('InvalidClientId')
try:
session['client'] = session['db'].client_data_query(
session['client']['cid'])
except DbException as db_err:
session['log'] = {'rc': 'error', 'msg': 'Non matching ClientID'}
raise ClientRequestError('UnknownClient', data=session['client']['cid'] + ' does not match data in database')
if session['client'] is None:
# The client could not be found.
# It means that the client is not yet defined in the database.
msg = ' cid:' + session['client']['cid']
LOG.info("Client not in database, " + msg)
session['log'] = {'rc': 'ok', 'msg': 'Unknown CLIENT '}
raise ClientRequestError('UnknownClient', data='No entry for client in database')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle_inform(self, session, request):\n # Verify the parameters\n params = get_element('params', request)\n # Default value for protocol_compression\n protocol_compression = 'NONE'\n response = {}\n\n if params is not None:\n try:\n # Fetch inform parameters and load into session\n self.fetch_inform_params(session, params)\n # handle a possible subscriber id (MACless communication)\n self.compare_protocol_versions(session)\n # If protocol_compression method is provided, check if valid\n self.handle_protocol_compression(session)\n # Validate and check reason (event) for this session\n self.handle_client_id(session)\n # Parse provided protocol version parameters and check validity\n self.handle_connection_event(session)\n # Check for unknown parameters provided in RPC\n for key in params:\n if key not in RPCS.VALID_INFORM_PARAMETERS:\n raise ClientRequestError(\"InvalidParameterName\", data=key)\n\n except ClientRequestError as inform_error:\n next_state = inform_error.error['next_state']\n error_message = {\"error\": inform_error.error['error']}\n\n if inform_error.error_name == \"InvalidClientId\":\n # As per defined in the protocol: Log in database\n session['log'] = {'rc': 'error', 'msg': 'Invalid client_id value'}\n\n LOG.debug(\"ExpectInform Error: \" + str(inform_error))\n return next_state, error_message\n except DbException:\n return (RPCS.ExpectInform, {\n 'error': {'code': -31997,\n 'message': 'Database access error'}})\n\n # Everything is OK with Inform RPC\n next_state = RPCS.ExpectRpc\n response['result'] = {\n 'protocol_version': self.protocol_version,\n 'protocol_compression': protocol_compression\n }\n\n # No parameters provided with inform RPC\n else:\n next_state = RPCS.ExpectInform\n response['error'] = {\n 'code': -32602, 'message': 'Invalid parameter'}\n\n return next_state, response",
"def detail(client_id):\n try:\n # Fetch client details from the BancBox api and render\n clientId = { 'bancBoxId': client_id }\n request_params = {'subscriberId': subscriber_id, 'clientId': clientId}\n results = api.service.getClient(request_params) \n client = results.client\n except Exception, e:\n logger.error('Error retrieving client [%s]: %s', client_id, e)\n client = {}\n return render_template('detail.html', client=client)",
"def on_report_to_master(client_id, data):",
"def get_client_data(self, client_id):\n query = \"\"\"SELECT id,\n secret\n FROM clients\n WHERE active = 1\n AND id = %s\"\"\"\n self._execute(query, (client_id,))\n return self._dictfetchone()",
"def from_client(self, data):\r\n pass",
"def request_client_id(self) -> None:\n GCR.log.log(Logger.INFORMATION, \"Demande d'un id client\")\n self.send({\"action\": \"request_id\", \"username\": self.username})",
"def received_information(update: Update, context: CallbackContext) -> int:\n text = update.message.text\n for a in user_d:\n category = user_d[a]\n if category == 'Public_Key' and len(text) == 58:\n assert len(text) == 58, update.message.reply_text(\"The address is invalid address\")\n user_d[category] = text\n elif category == 'Quantity' and type(int(text) == int):\n user_d[category] = int(text)\n elif category == 'Secret_Key' and len(text) > 58:\n user_d[category] = text\n else:\n user_d[category] = text\n user_data = context.user_data\n user_data[category] = user_d[category]\n\n update.message.reply_text(\n \"I got this from you:\\n\"\n f\"{facts_to_str(user_d)}\",\n reply_markup=markup_r,\n )\n user_d.clear()\n\n return CHOOSING",
"def create_client_request(self):\n try:\n query_to_add_client_request = \"INSERT INTO client_request(produce_name, quantity, price_range, users_id, date_created,date_modified) VALUES(%s,%s,%s,%s,%s,%s)\"\n connection.cursor.execute(query_to_add_client_request,( self.produce_name,self.quantity, self.price_range, self.current_user, self.date_created, self.date_modified))\n query_to_search_client_request = \"SELECT * FROM client_request WHERE produce_name=%s\"\n connection.cursor.execute(query_to_search_client_request, [self.produce_name])\n added_produce = connection.cursor.fetchone()\n result = {\n 'id': added_produce[0],\n 'produce_name': added_produce[1],\n 'quantity':added_produce[2],\n 'price_range': added_produce[3],\n }\n\n return result\n \n except Exception as exc:\n print(exc)",
"def recieve_information_from_client():\r\n client_data = request.forms.get('json')\r\n client_data_dict = json.loads(client_data)\r\n return client_data_dict",
"def get_info(self,honeypotids):\n req = {\"type\":\"get_info\",\n \"from\":self.network.mc_id,\n \"to\": honeypotids}\n expect_dict = {\"type\":\"send_info\"}\n msg_list = self.send_receive(req,honeypotids,expect_dict)\n answer = {}\n for msg in msg_list:\n answer[msg[\"from\"]] = msg[\"info\"]\n return answer",
"def set_client_id(self):\n data = self.receive() # deserialized data\n client_id = data['clientid'] # extracts client id from data\n self.client_id = client_id # sets the client id to this client\n print(\"Successfully connected to server: \" + self.userInfo['host'] + \" / \" + str(self.userInfo['port']))\n print(\"Your client info is:\\n\" + \"Client Name: \" + self.userInfo['name'] + \"\\nClient ID: \" + str(client_id))",
"def change_client_info(request: Request) -> Dict:\n ser = ChangeClientInfoSerializer(data=request.data)\n if ser.is_valid():\n if ser.validated_data.get('email') and request.user.client.email != ser.validated_data['email']:\n request.user.client.activated = False\n new_email = UserEmail(template_id=1, user=request.user)\n new_email.generate_code()\n is_send = send_email_to_user(1, [request.user.client.email], f'https://royal-lion.bet/activate/{new_email.code}')\n if is_send:\n new_email.save()\n request.user.client.save()\n ser.update(request.user.client, validated_data=ser.validated_data)\n return {'data': 'ok', 'success': True}\n else:\n return {'errors': ser.errors, 'success': False}",
"def client_details(self, value):\n self._client_details = value",
"def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))",
"def datapack_details(request):\n print 'get datapack details'\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n network_details = data[telephone_number]['data_details']\n print network_details\n\n\n\n reply = \"Our Initial Investigation shows that you're are currently using \" + network_details['network_services_available'] + \" and have subscribed for \" + network_details['network_services_subscribed'] + \".\"\n if network_details['megabytes_available'] == 0:\n reply += \" You have exhausted your datapack. Change your network settings to use pay2go plan or recharge now with available datapacks. Please check http://www.airtel.in/Airtel3G/tariff.html\"\n elif network_details['network_services_available'] != network_details['network_services_subscribed']:\n reply += \" Your subscribed datapack settings does not match with services available. Please change your network settings\"\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n context['datapack'] = reply\n\n return context",
"def getInfo(notification):",
"def __ServiceClient(self,Client):\n\t\twhile True:\n\t\t\tDataClient = Client.recv(1024)\n\t\t\tprint(DataClient)\n\t\t\t# your source code here\n\t\t\tmessage = DataClient\n\t\t\t# data to be sent to api\n\t\t\tdata = {'message': message}\n\t\t\t# sending post request and saving response as response object\n\t\t\tr = requests.post(url = self.API_ENDPOINT, data = data)\n\t\t\t# extracting response text\n\t\t\t#pastebin_url = r.text\n\t\t\t#print(\"The pastebin URL is:%s\"%pastebin_url)",
"async def execute(self):\n payload = self.integration_client.dict()\n\n await self.rethink_handler.single_insert_update(\n db='mesxpert_view',\n table='integration_clients',\n payload=payload\n )",
"def __set_client_detail(self):\r\n ClientDetail = self.client.factory.create('ClientDetail')\r\n ClientDetail.AccountNumber = self.config_obj.account_number\r\n ClientDetail.MeterNumber = self.config_obj.meter_number\r\n ClientDetail.IntegratorId = self.config_obj.integrator_id\r\n if hasattr(ClientDetail, 'Region'):\r\n ClientDetail.Region = self.config_obj.express_region_code\r\n self.ClientDetail = ClientDetail",
"def choose(self, _id):\n app = App.get_running_app()\n self.manager.client = app.session.query(Client).filter(Client.id == _id).one()\n self.manager.current = 'info'",
"def client_message_handler(self, message, client):\n LOG.debug(f\"Разбираем сообщение: {message}\")\n if (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_PRESENCE\n and s.KEY_TIME in message\n and s.KEY_USER in message\n ):\n if message[s.KEY_USER][s.KEY_ACCOUNT_NAME] not in self.names.keys():\n self.names[message[s.KEY_USER][s.KEY_ACCOUNT_NAME]] = client\n MSG.send(client, s.RESPONSE_200)\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Имя пользователя уже занято.\"\n MSG.send(client, response)\n self.clients.remove(client)\n client.close()\n return\n # Если это сообщение, то добавляем его в очередь сообщений.\n # Ответ не требуется.\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_MESSAGE\n and s.KEY_TIME in message\n and s.KEY_TO in message\n and s.KEY_FROM in message\n and s.KEY_MESSAGE in message\n ):\n self.messages.append(message)\n return\n # Если клиент выходит\n elif (\n s.KEY_ACTION in message\n and message[s.KEY_ACTION] == s.ACTION_EXIT\n and s.KEY_ACCOUNT_NAME in message\n ):\n self.clients.remove(self.names[message[s.KEY_ACCOUNT_NAME]])\n self.names[message[s.KEY_ACCOUNT_NAME]].close()\n del self.names[message[s.KEY_ACCOUNT_NAME]]\n return\n # Иначе отдаём Bad request\n else:\n response = s.RESPONSE_400\n response[s.KEY_ERROR] = \"Запрос не корректен\"\n MSG.send(client, response)\n return",
"def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(\n i_dbgap_description='lorem ipsum',\n source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')",
"async def tell(client, data):\n conn = client.bot.dbs[data.server]\n split = data.split_message\n\n tables = db.get_table_names(conn)\n if 'tells' not in tables:\n asyncio.create_task(client.message(data.target, 'Tell table uninitialized, ask your nearest bot admin to restart the bot.'))\n\n if len(split) > 1:\n recipient = split[0]\n recipient = recipient.lower()\n message = ' '.join(split[1:])\n else:\n return\n \n telldata = (recipient, data.nickname, message, int(time.time()), '0', '0')\n db.set_row(conn, 'tells', telldata)\n db.ccache()\n\n asyncio.create_task(client.notice(data.nickname, 'Your message will be sent.'))",
"def test_context_data_info_message_for_one_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '1 result found.')",
"def contains_id(self, message_id, customer_id):\r\n \r\n #id_str = f\"SELECT count(*) FROM auditlog where customerID={customer_id} and message_id='{message_id}'\"\r\n id_str = f\"SELECT count(*) FROM auditlog where customerID=? and message_id=?\"\r\n params = (customer_id, message_id)\r\n\r\n try:\r\n # execute insertion into table\r\n self.cursor.execute(id_str, params)\r\n except:\r\n # something failed. Wait a few seconds to try to get a decent close of connection, reconnect and execute again\r\n time.sleep(30)\r\n self.connect()\r\n self.cursor.execute(id_str, params)\r\n\r\n # get count of id\r\n count = self.cursor.fetchone()[0]\r\n print(f\"item count in auditlog: {count}\")\r\n\r\n id_str = f\"SELECT message_id,timestamp_email,text FROM auditlog where customerID=? and message_id=?\"\r\n params = (customer_id, message_id)\r\n self.cursor.execute(id_str, params)\r\n alt_count = self.cursor.fetchall()\r\n print(f\"item alt_count in auditlog: {alt_count}\")\r\n\r\n return count > 0",
"def client_id(self) -> str:",
"def on_text_message(self, update, context):\n chat_id = update.effective_chat.id\n log.info(\"Msg from:%s `%s`\", chat_id, update.effective_message.text)\n\n if context.user_data[\"state\"] == c.State.EXPECTING_AMOUNT:\n log.info(\"Vol:%s spent %s MDL on this request\", chat_id, update.effective_message.text)\n # TODO validate the message and make sure it is a number, discuss whether this is necessary at all\n # TODO send this to the server, we need to define an API for that\n request_id = context.user_data[\"current_request\"]\n\n # Write this amount to the persistent state, so we can rely on it later\n context.bot_data[request_id][\"amount\"] = update.effective_message.text\n\n # Then we have to ask them to send a receipt.\n self.send_message_ex(update.message.chat_id, c.MSG_FEEDBACK_RECEIPT)\n context.user_data[\"state\"] = c.State.EXPECTING_RECEIPT\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_FURTHER_COMMENTS:\n log.info(\"Vol:%s has further comments: %s\", chat_id, update.effective_message.text)\n request_id = context.user_data[\"current_request\"]\n context.bot_data[request_id][\"further_comments\"] = update.effective_message.text\n self.finalize_request(update, context, request_id)\n return\n\n if context.user_data[\"state\"] == c.State.EXPECTING_PROFILE_DETAILS:\n self.build_profile(update, context, raw_text=update.effective_message.text)\n return\n\n # if we got this far it means it is some sort of an arbitrary message that we weren't yet expecting\n log.warning(\"unexpected message ..........\")",
"def query_client(self, client_id):\n try:\n return self.client_model.objects.get(client_id=client_id)\n except self.client_model.DoesNotExist:\n return None",
"def on_get(self, req, resp, **kwargs):\n note_id = kwargs['note_id']\n self.validate_note_id(note_id)\n note = self.get_note_with_access_check(req.context, note_id)\n resp.text = self.get_note_details(note)\n resp.status = falcon.HTTP_200",
"def get_announcement(self, cid):\n cid = cid.upper()\n query = \"SELECT * from announcement where cid = %s\"\n inputs = (cid, )\n result = self.database_manager.execute_query(query, inputs)\n if result:\n announcement = \"Announcement for {} ({}): {}\".format(result[0][0], result[0][3], result[0][2])\n else:\n announcement = \"No announcement for this {}\".format(cid)\n return announcement",
"def manage_read_request(self, client):\n\n # obtain the message\n message = client.recv()\n message = json.loads(message)\n msg = message[\"payload\"].strip()\n if msg.startswith(\"/\"):\n type = \"c2s\"\n elif msg.startswith(\"@\"):\n type = \"c2c\"\n else:\n type = \"c2g\"\n\n func = getattr(self, \"request_\"+type)\n func(client, message)\n # self.msg_map[message['type']](client, message)",
"def info(self, id):",
"def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__",
"def bulk_detail(self, *appids):\n bulk_message = self.bulkDetails(appids)\n for appid, message in zip(appids, bulk_message.entry):\n try:\n if message.ByteSize() > 0:\n item = {}\n item[\"appid\"] = message.doc.docid\n item[\"version_code\"] = message.doc.details.appDetails.versionCode\n item[\"price\"] = message.doc.offer[0].formattedAmount.lower()\n seen = self.redis.hget(\"app_record\", item['appid'])\n if item['price'] != 'free':\n self.redis.sadd(\"paid_appids\", item['appid'])\n continue\n if str(item[\"version_code\"]) != seen:\n if not seen:\n item['tag'] = 'new'\n else:\n item['tag'] = 'updated'\n else:\n #self.log.warning(\"Ignore app %s vc %s local vc %s\" % (item['appid'], item['version_code'], seen))\n continue\n\n share_url = message.doc.shareUrl\n response = self.requests.get(share_url)\n if response.status_code == 404:\n continue\n\n q = _Q(response.content.decode('utf-8'))\n item[\"offer_type\"] = message.doc.offer[0].offerType\n category_url = q(\".document-subtitle.category\").attr('href')\n\n category = ''\n if category_url:\n category = re.search('.*/(.*?)$', category_url).group(1)\n item[\"category_id\"] = CATEGORY_MAP.get(category, 'TOOLS')\n item[\"category_play\"] = category\n item[\"description\"] = q('div[itemprop=description]').html()\n item[\"lang\"] = unicode(guess_language(q('.id-app-orig-desc').text() or 'en'))\n item[\"developer\"] = q(\"a.document-subtitle.primary span\").text()\n item[\"group\"] = GROUP_MAP.get(message.doc.details.appDetails.appType) or 'app'\n item[\"icon\"] = [img.imageUrl for img in message.doc.image if img.imageType == 4][0]\n item[\"is_deleted\"] = False\n item[\"name\"] = message.doc.title\n name = re.sub(ur\"\"\"\\$|\\%|\\(|\\)|\\[|\\[|\\]|\\*|\\ |\\®|\\#|\\~|\\`|\\@|\\^|\\&|\\{|\\}|\\<|\\>|\\?|\\\"|\\'|\\’|\\–|\\:|\\;|\\||\\/|\\+|\\!|\\•|\\,|\\™|\\_\"\"\", '-', item['name'])\n name_url = urllib.quote(name.encode('utf-8'))\n if \"%\" not in name_url:\n item['name_url'] = name_url\n\n item[\"operating_systems\"] = q(\"div[itemprop=operatingSystems]\").text().strip()\n item[\"order\"] = 0\n item[\"rating\"] = message.doc.aggregateRating.starRating\n item['rating_user'] = humanize.intcomma(message.doc.aggregateRating.ratingsCount)\n\n total_count = message.doc.details.appDetails.numDownloads\n total_count = remove_downloads(total_count)\n item[\"total_count\"] = total_count\n item[\"download_count\"] = strCount_to_intCount(total_count)\n\n item[\"release_time\"] = message.doc.details.appDetails.uploadDate\n item[\"screenshot\"] = [img.get('src') if img.get('src').startswith('http') else 'http:' + img.get('src') for img in q(\"div.thumbnails img[itemprop=screenshot]\")]\n item[\"update_info\"] = q(\".recent-change\").text().strip()\n item[\"version_name\"] = q(\"div[itemprop=softwareVersion]\").text()\n item[\"size\"] = humanize.naturalsize(message.doc.details.appDetails.installationSize, gnu=True)\n item[\"source\"] = 'crawler'\n item[\"channel\"] = 'googleplay'\n item[\"paid\"] = 1 # 1 for free, 2 for paid\n item[\"search_order\"] = 0\n item[\"search_reindex\"] = 1\n item['app_status'] = 0\n\n yield item\n else:\n yield {\"appid\": appid, 'notfound': True}\n except Exception as e:\n traceback.print_exc()",
"def get_client_info(client_id):\n if client_id not in drivers:\n return None\n\n driver_status = drivers[client_id].get_status()\n is_alive = False\n is_logged_in = False\n if (driver_status == WhatsAPIDriverStatus.NotLoggedIn\n or driver_status == WhatsAPIDriverStatus.LoggedIn):\n is_alive = True\n if driver_status == WhatsAPIDriverStatus.LoggedIn:\n is_logged_in = True\n \n return {\n \"is_alive\": is_alive,\n \"is_logged_in\": is_logged_in,\n \"is_timer\": bool(timers[client_id]) and timers[client_id].is_running\n }",
"def viewOne(id):\n print(inspect.stack()[1][3])\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to find the given client'}\n return list_to_json(ResultSet)",
"def get_self_info_client(request: Request) -> ReturnDict:\n client_info_serializer = ClientInfoSerializer(request.user.client)\n customer_account = CustomerAccountSerializer(request.user.customeraccount)\n data = client_info_serializer.data\n data['customer_account'] = customer_account.data\n return data",
"def fetch_info(self, client):\n self.log_verbose(\"Sending info command\")\n client.send(\"info\")\n try:\n data = client.read_response()\n except RedisError as e:\n collectd.error(\"redis_info plugin: Error response from %s:%d - %r\" % (self.host, self.port, e))\n return None\n\n self.log_verbose(\"Received data: %s\" % data)\n\n linesep = \"\\r\\n\" if \"\\r\\n\" in data else \"\\n\"\n info_dict = self.parse_info(data.split(linesep))\n\n return info_dict",
"def on_slave_report(client_id, data):",
"def process_med_info_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\t\traise Exception(\"Not yet implemented\")",
"def on_message(client, userdata, msg):\n if msg.topic == \"adjudication/pass\":\n print(\"'adjudication/pass' message received!\")\n print(\"Money dispensed!\")\n sys.exit()\n\n elif msg.topic == \"adjudication/fail_face\":\n print(\"'adjudication/fail_face' message received!\")\n verification = raw_input(\"Facial recognition failed. Please enter your password to authenticate your identity: \")\n client.publish(\"local/response\", payload=verification, qos=2, retain=False)\n print(\"Test...response sent\")\n\n elif msg.topic == \"adjudication/terminate_face\": # changed from fail info\n print(\"'adjudication/terminate_face' message received!\")\n print(\"Facial recognition failed. We are unable to dispense money.\")\n sys.exit()\n\n elif msg.topic == \"adjudication/terminate_info\": # changed from fail info\n print(\"'adjudication/terminate_info' message received!\")\n print(\"Facial recognition was marginal and identity verification failed. We are unable to dispense money.\")\n sys.exit()\n\n elif msg.topic == \"adjudication/usernotfound\":\n print(\"'adjudication/usernotfound' message received!\")\n print(\"User is not in database. We are unable to dispense money.\")\n sys.exit() \n\n else:\n print(\"Message with unspecificied topic received from local client: \", msg.topic)\n print(\"###### No action taken on message ######\")",
"def action_retrieve_cae(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n #TODO: not correct fix but required a frech values before reading it.\n self.write(cr, uid, ids, {})\n\n conn_obj = self.pool.get('wsafip.connection')\n serv_obj = self.pool.get('wsafip.server')\n currency_obj = self.pool.get('res.currency')\n\n Servers = {}\n Requests = {}\n Inv2id = {}\n for inv in self.browse(cr, uid, ids, context=context):\n journal = inv.journal_id\n conn = journal.afip_connection_id\n\n # Only process if set to connect to afip\n if not conn: continue\n \n # Ignore invoice if connection server is not type WSFEX.\n if conn.server_id.code != 'wsfex': continue\n\n Servers[conn.id] = conn.server_id.id\n\n # Take the last number of the \"number\".\n # Could not work if your number have not 8 digits.\n invoice_number = int(re_number.search(inv.number).group())\n\n _f_date = lambda d: d and d.replace('-','')\n\n # Build request dictionary\n if conn.id not in Requests: Requests[conn.id] = {}\n Requests[conn.id][inv.id]=dict( (k,v) for k,v in {\n 'Id': journal.point_of_sale, ######## EDIT (AVERIGUAR ID)\n 'CbteFch': _f_date(inv.date_invoice),\n 'CbteTipo': journal.journal_class_id.afip_code,\n 'PtoVta': journal.point_of_sale,\n 'CbteNro': journal.point_of_sale, ########## BUSCAR NUMERO (00000103 por ej)\n 'TipoExpo': inv.afip_concept, ############## EDITAR PARA QUE SEA TIPO_EXPO DIFERENTE AL DE LA WSFE\n \n 'Permisos': { 'Permiso': self.get_permiso(cr, uid, inv.id) },\n \n 'DstCmp': inv.partner_id.country_id.afip_code,\n 'Cliente': inv.partner_id.name,\n 'CuitPaisCliente': inv.partner_id.country_id.cuit_juridica if inv.partner_id.is_company == True else inv.partner_id.country_id.cuit_fisica,\n 'DomicilioCliente': inv.partner_id.street+\", \"+inv.partner_id.city,\n 'IdImpositivo': ######### AVERIGUAR COMO COMPLETAR ESTE CAMPO\n \n 'MonId': inv.currency_id.afip_code,\n 'MonCotiz': currency_obj.compute(cr, uid, inv.currency_id.id, inv.company_id.currency_id.id, 1.),\n \n 'ObsComerciales': inv.comment, ####### NECESARIO HACER UN CAMPO NUEVO LLAMADO OBS COMERCIALES?\n 'ImpTotal': inv.amount_total,\n 'Obs': inv.comment,\n \n 'CbtesAsoc': {'CbteAsoc': self.get_related_invoices(cr, uid, inv.id) },\n \n 'FormaPago': inv.payment_term,\n 'Incoterms': inv.payment_term, ####### HACER FUNCIONALIDAD DE INCOTERMS\n 'IncotermsDs': inv.payment_term, ####### HACER FUNCIONALIDAD DE INCOTERMS DESCRIPTION (VACIO WHEN Incoterms == None)\n 'IdiomaCbte': inv.payment_term, ####### HACER FUNCIONALIDAD IDIOMAS (son solo 3, necesario hacer una tabla?)\n \n 'Items': { 'Item': self.get_item(cr, uid, inv.id) }, ####### HACER FUNCIONALIDAD ITEMS\n \n }.iteritems() if v is not None)\n Inv2id[invoice_number] = inv.id\n\n for c_id, req in Requests.iteritems():\n conn = conn_obj.browse(cr, uid, c_id)\n res = serv_obj.wsfex_get_cae(cr, uid, [conn.server_id.id], c_id, req) # TODO:\n for k, v in res.iteritems():\n if 'CAE' in v:\n self.write(cr, uid, Inv2id[k], {\n 'afip_cae': v['CAE'],\n 'afip_cae_due': v['CAEFchVto'],\n })\n else:\n msg = 'Factura %s:\\n' % k + '\\n'.join(\n [ u'(%s) %s\\n' % e for e in v['Errores']] +\n [ u'(%s) %s\\n' % e for e in v['Eventos']]\n )\n raise osv.except_osv(_(u'AFIP Validation Error'), msg)\n\n return super(invoice, self).action_retrieve_cae(cr, uid, ids, context)",
"def getClientAndConfiguration(request, format=None):\n\n #idCompany = request.data['idCompany']\n idCompany = request.data.get(\"idCompany\", None)\n\n isAdmin = request.data.get('isAdmin', False)\n isCompany = request.data.get('isCompany', False)\n ids_clients = []\n\n print(\"view getClientAndConfiguration idCompany isAdmin is Company;\", idCompany, isAdmin, isCompany)\n if isAdmin or isCompany:\n if idCompany == None:\n # Se consultan todos los clientes sin filtro pq esto solo lo puede hacer el usuario admin\n print(\"VIEW getClientAndConfiguration consulto clientes para todas las compañias \")\n ids_clients = Client.objects.values(\"id\")\n else:\n # Consultar todos los ids de clientes asociados a una compañia\n print (\"VIEW getClientAndConfiguration consulto clientes para una compañia con id \", idCompany)\n ids_clients = Client.objects.filter(company__id=idCompany).values(\"id\")\n\n print (\" VIEW getClientAndConfiguration ids_clients\", str(ids_clients))\n # max_survey=Config_surveys_by_clients.objects.filter(client=OuterRef('pk'))),\n # Los ultimos dos campos los agregue para que se puedan mostrar facilmente en una lista desplegable, pues esas listas esperan el id con el campo value y el texto con el campo text\n clients_with_configuration= Config_surveys_by_clients.objects.all().filter(client__id__in=ids_clients).values('client__id', 'client__client_company_name', 'client__identification', 'client__company_id','client__company__name','client__constitution_year', 'client__number_employees',\n 'client__is_corporate_group', 'client__is_family_company',\"max_surveys\",\"used_surveys\").annotate(config_id=F('id'),text=F('client__client_company_name'), value=F('client__id'))\n #clients_without_configuration= Client.objects.all().annotate(max_surveys=Value('0'),used_surveys=Value('0'))\n\n # Se consultan los id de los que si tienen configuracion para excluirlos de la consulta directa de la tabla de clientes y así hacer que la union no tenga repetidos\n clients_with_configuration_ids = Config_surveys_by_clients.objects.all().values('client__id')\n\n # Se hace la resta en los campos que se anotan solo como truco para que los valores sean zero pues no encontre como inicializarlos realmente en cero\n clients_without_configuration= Client.objects.exclude(id__in=clients_with_configuration_ids).filter(id__in=ids_clients).values('id', 'client_company_name', 'identification', 'company_id','company__name','constitution_year', 'number_employees',\n 'is_corporate_group', 'is_family_company').annotate(max_surveys=Count('id')-Count('id'),used_surveys=Count('id')-Count('id'),config_id=Count('id')-Count('id'), text=F('client_company_name'),value=F('id')).order_by('-updated_at')\n\n #FIXME - Tratar de agregar los campos que faltan manualmente antes de retornar los datos\n all_clients= clients_without_configuration.union(clients_with_configuration)\n print(\"Clients and config survey \", all_clients)\n return Response(all_clients)\n # return Response ()",
"async def extract_data_to_notify(app):\n notification_data = await app.db.select(\n f\"\"\"\n SELECT added_timestamp,\n total_area,\n full_url, \n jk_url,\n nearest_underground, \n nearest_underground_dist, \n price_rur, \n rooms_count,\n is_apartments\n FROM public.flats\n WHERE 1=1 \n AND created_at > (now() - interval '1 hours' - interval '1 minute')\n -- AND distance_from_center <= 9\n \"\"\", {}\n )\n return notification_data",
"def get_client_by_id(self, client_id):\r\n cursor = self.conn.cursor()\r\n cursor.execute(\"\"\"SELECT * FROM CLIENT WHERE id={}\"\"\".format(client_id))\r\n return cursor.fetchall()",
"def _set_search_additional_message(self):\n if self._report_data and self._report_data['details']:\n for detail in self._report_data['details']:\n messages = []\n has_exempt_note: bool = False\n if detail.get('location'):\n if detail['location'].get('leaveProvince') and detail['status'] == 'EXEMPT':\n messages.append({'messageType': 'OUT_PROV'})\n if detail.get('description') and detail['description'].get('sections'):\n sections = detail['description'].get('sections')\n for section in sections:\n if section.get('widthFeet', 0) >= 16:\n messages.append({'messageType': 'WIDTH'})\n break\n if detail.get('notes'):\n for note in detail['notes']:\n if detail['status'] == 'CANCELLED' and note.get('documentType', '') == 'REGC':\n messages.append({'messageType': 'REGC'})\n elif note.get('documentType', '') in ('EXRS', 'EXNR') and note.get('createDateTime'):\n has_exempt_note = True\n message = {\n 'messageType': note.get('documentType'),\n 'messageId': note.get('documentRegistrationNumber', ''),\n 'messageDate': Report._to_report_datetime(note['createDateTime'], False)\n }\n messages.append(message)\n if not has_exempt_note and detail.get('status') == 'EXEMPT':\n message = {'messageType': 'EXEMPT'}\n messages.append(message)\n if messages:\n detail['messages'] = messages",
"def test_client_retrieve(self):\n pass",
"def perform_create(self, serializer):\n user = SupportRequest.target_user(self.request)\n client = Client.objects.filter_by_user(user).get(id=int(self.get_parents_query_dict()['client']))\n if 'client' in serializer.validated_data:\n if 'civil_status' in serializer.validated_data['client']:\n client.civil_status = serializer.validated_data['client']['civil_status']\n if 'smoker' in serializer.validated_data['client']:\n client.smoker = serializer.validated_data['client']['smoker']\n if 'drinks' in serializer.validated_data['client']:\n client.drinks = serializer.validated_data['client']['drinks']\n if 'height' in serializer.validated_data['client']:\n client.height = serializer.validated_data['client']['height']\n if 'weight' in serializer.validated_data['client']:\n client.weight = serializer.validated_data['client']['weight']\n if 'daily_exercise' in serializer.validated_data['client']:\n client.daily_exercise = serializer.validated_data['client']['daily_exercise']\n\n if 'home_value' in serializer.validated_data['client']:\n client.home_value = serializer.validated_data['client']['home_value']\n if 'home_growth' in serializer.validated_data['client']:\n client.home_growth = serializer.validated_data['client']['home_growth']\n if 'ss_fra_todays' in serializer.validated_data['client']:\n client.ss_fra_todays = serializer.validated_data['client']['ss_fra_todays']\n if 'ss_fra_retirement' in serializer.validated_data['client']:\n client.ss_fra_retirement = serializer.validated_data['client']['ss_fra_retirement']\n if 'state_tax_after_credits' in serializer.validated_data['client']:\n client.state_tax_after_credits = serializer.validated_data['client']['state_tax_after_credits']\n if 'state_tax_effrate' in serializer.validated_data['client']:\n client.state_tax_effrate = serializer.validated_data['client']['state_tax_effrate']\n if 'pension_name' in serializer.validated_data['client']:\n client.pension_name = serializer.validated_data['client']['pension_name']\n if 'pension_amount' in serializer.validated_data['client']:\n client.pension_amount = serializer.validated_data['client']['pension_amount']\n if 'pension_start_date' in serializer.validated_data['client']:\n client.pension_start_date = serializer.validated_data['client']['pension_start_date']\n if 'employee_contributions_last_year' in serializer.validated_data['client']:\n client.employee_contributions_last_year = serializer.validated_data['client']['employee_contributions_last_year']\n if 'employer_contributions_last_year' in serializer.validated_data['client']:\n client.employer_contributions_last_year = serializer.validated_data['client']['employer_contributions_last_year']\n if 'total_contributions_last_year' in serializer.validated_data['client']:\n client.total_contributions_last_year = serializer.validated_data['client']['total_contributions_last_year']\n client.save()\n return serializer.save(client=client)",
"def _get_client_id(self, context):\n for key, value in context.invocation_metadata():\n if key == 'client_id':\n return value\n raise Exception('client id not found')",
"def get_remote_alert_data(client: Client, remote_alert_id: str) -> tuple[Dict, Dict]:\n alert_details = client.alert_get_details_request(alert_id=remote_alert_id, detailed='true')\n\n updated_object: Dict[str, Any] = {}\n for field in INCIDENT_INCOMING_MIRROR_ARGS:\n mirrored_field_value = demisto.get(alert_details, field, '')\n if '.' in field: # field is nested (currently it is only the policy.name field)\n # policy.name field was added to the mirrored fields cause this is the field that is used in the default classifier to\n # classify incident types. Without it, all mirrored alerts will be changed to the default incident type which is\n # 'Prisma Cloud'.\n split_field = field.split('.')\n updated_object[split_field[0]] = {split_field[1]: mirrored_field_value}\n else:\n updated_object[field] = mirrored_field_value\n return alert_details, updated_object",
"def alert(user, follow, data, client_fb):\n users_notify = database.get_from_notify(username=user, username_follow=follow)\n for user in users_notify:\n if user['thread_type'] == '0':\n if user['image_flag']:\n message = get_message(message_type='image', source=follow, data=data)\n client_fb.sendLocalImage(image_path='tmp.jpg', message=message, thread_id=str(user['thread_id']))\n client_fb.sendMessage(message=MESSAGE_FOOTER, thread_id=str(user['thread_id']))\n logger.info('User %s notified %s on facebook.', user, str(user['thread_id']))\n # clean image created\n os.remove('tmp.jpg')\n else:\n message = get_message(message_type='no_image', source=follow)\n client_fb.sendMessage(message=message, thread_id=str(user['thread_id']))\n logger.info('%s got notified on facebook.', str(user['thread_id']))",
"def on_message(client, userdata, msg):\n# print(\"message received \", str(msg.payload.decode(\"utf-8\")))\n# print(\"message topic=\", msg.topic)\n# print(\"message qos=\", msg.qos)\n# print(\"message retain flag=\", msg.retain)\n# return\n\n device_id, data_type = re.findall(MQTT_REGEX, msg.topic)[0]\n data = json.loads( ( msg.payload.decode('utf-8') ) )\n\n if ( \"diag\" == data_type ):\n _send_diag_data_to_influxdb( device_id, data )\n\n if ( \"data\" == data_type ):\n _send_sizing_data_to_influxdb( device_id, data )",
"def show_callback(self, client_data):\n pass",
"def who(id, client, name):\r\n exists = person.locate(id)\r\n\r\n if not exists:\r\n person.create(id, name)\r\n client.send_message(id, 'Hello ' + name + '''!\r\nI am your Journal Buddy :) My purpose is to help you manage your productivity!\r\n\r\nYou can:\r\n - Set up habits\r\n - Schedule events\r\n - Create a task pool\r\n - Set reminders\r\n - Add notes - tell me things you don’t want to forget! May it be a quote, or a movie you liked\r\n\r\nEvery month we can choose which tasks you would like to complete in the month ahead!\r\nEvery week I will help you set goals in terms of tasks and habits for the week ahead!\r\n\r\nThe more we get to know each other the more I will be able to help! Providing weekly analytics and suggestions for future performance. Tell me how you feel throughout your tasks, habits and events and I will share with you what I think will work best.\r\n\r\nType /setup to begin!''')\r\n\r\n users[id] = {\"last\": \"new user\"}\r\n elif not (users.keys().__contains__(id)):\r\n users[id] = {\"last\": \"welcome back\"}",
"def send_alert_to(self, event):\n client_data = event['client_data']\n if self.scope[\"user\"].username != event['sent_from'] \\\n and self.scope[\"user\"].username == client_data['sent_to']:\n self.send(text_data=json.dumps(client_data))",
"def get_message_by_person_id(self,person_id):\n if type(person_id) == str:\n # logging.info(\"object id\")\n person_id = ObjectId(person_id)\n # logging.info(\"person id %s\"%person_id)\n try:\n\t result = self.mongodb.message.info.find({\"person_id\":person_id}).sort([('date',-1)]).limit(1)[0]\n\texcept Exception as e:\n\t raise DBQueryError('exception when get message info , error message is %s'%str(e))\n if result == []or result == None:\n raise DBQueryError('error when get message detail infomation by person_id') \n\t\n logging.info(\"success %s\"%result)\n\n return result",
"def process_refill_response(self, sender, message, response):\n\t\tnow = datetime.datetime.now()\n\t\tmessage.datetime_responded = now\n\t\tmessage.save()\n\n\t\t# Switch on type of response\n\t\tif self.is_yes(response):\n\t\t\t# TODO(mgaba): Implement questions about weekly, monthly prescriptions. What's the right day?\n\t\t\t# Send out a medication ack message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = True\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\tnotifications = message.notifications.all()\n\t\t\tfor notification in notifications:\n\t\t\t\tnotification.active = False\n\t\t\t\tnotification.save()\n\n\t\t\t# Calculate the time of the next earliest notification to put in the message that gets sent back\n\t\t\tearliest_notification = None\n\t\t\tnow = datetime.datetime.now()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.prescription.filled = True\n\t\t\t\tfeedback.prescription.save()\n\t\t\t\tmed_notifications = Notification.objects.filter(prescription=feedback.prescription, _type=Notification.MEDICATION)\n\t\t\t\tfor med_notification in med_notifications:\n\t\t\t\t\tif med_notification.send_datetime < now:\n\t\t\t\t\t\tmed_notification.update_to_next_send_time()\n\t\t\t\t\tif earliest_notification == None or earliest_notification.send_datetime > med_notification.send_datetime:\n\t\t\t\t\t\tearliest_notification = med_notification\n\n\t\t\t# Convert the time of the next earliest notification to a string for the template\n\t\t\thour = earliest_notification.send_datetime.hour\n\t\t\tminute = earliest_notification.send_datetime.minute\n\t\t\tif hour == 0:\n\t\t\t\thour = 12\n\t\t\t\tampm = 'am'\n\t\t\telif hour == 12:\n\t\t\t\thour = 12\n\t\t\t\tampm = 'pm'\n\t\t\telif hour > 12:\n\t\t\t\thour = hour - 12\n\t\t\t\tampm = 'pm'\n\t\t\telse:\n\t\t\t\tampm = 'am'\n\t\t\tif earliest_notification.send_datetime.date() == now.date():\n\t\t\t\tday = \"today\"\n\t\t\telif earliest_notification.send_datetime.date() == now.date() + datetime.timedelta(days=1):\n\t\t\t\tday = \"tomorrow\"\n\t\t\telif earliest_notification.send_datetime.date() < now.date() + datetime.timedelta(days=7):\n\t\t\t\tweekdays = {'0':'Monday',\n\t\t\t\t '1':'Tuesday',\n\t\t\t\t '2':'Wednesday',\n\t\t\t\t '3':'Thursday',\n\t\t\t\t '4':'Friday',\n\t\t\t\t '5':'Saturday',\n\t\t\t\t '6':'Sunday'}\n\t\t\t\tday = \"on \" + weekdays[str(earliest_notification.send_datetime.weekday())]\n\n\t\t\t# Create new message\n\t\t\tcontext = {'hour':hour,\n\t\t\t 'minute':minute,\n\t\t\t 'ampm':ampm,\n\t\t\t 'day':day}\n\t\t\ttemplate = 'messages/refill_ack_message.txt'\n\t\t\tcontent = render_to_string(template, context)\n\t\t\tMessage.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, previous_message=message, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_no(response):\n\t\t\t# Send out a medication questionnaire message\n\t\t\t# Update state\n\t\t\tfeedbacks = message.feedbacks.all()\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tfeedback.completed = False\n\t\t\t\tfeedback.datetime_responded = now\n\t\t\t\tfeedback.save()\n\n\t\t\t# Create a questionnaire message\n\t\t\ttemplate = 'messages/refill_questionnaire_message.txt'\n\t\t\tcontext = {'response_dict': iter(sorted(Message.REFILL_QUESTIONNAIRE_RESPONSE_DICTIONARY.items()))}\n\t\t\tcontent = render_to_string(template, context)\n\n\t\t\t# Create new message\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.REFILL_QUESTIONNAIRE, previous_message=message,\n\t\t\t content=content)\n\t\t\tfor feedback in feedbacks:\n\t\t\t\tnew_m.feedbacks.add(feedback)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\n\t\telif self.is_med_info(response):\n\t\t\t# Send out a med info message\n\t\t\t# TODO:Implement med info for real\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\tcontent = \"Medication information is a work in progress.\\n\\n\"+\\\n\t\t\t\t\t \"Did you pick up your meds?\\n\"+\\\n\t\t\t\t\t \"y - yes\\n\"+\\\n\t\t\t\t\t \"n - no\"\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\t\t\tpass\n\t\t# Unknown response\n\t\telse:\n\t\t\tmessage.datetime_responded = None\n\t\t\tmessage.save()\n\t\t\ttemplate = 'messages/unknown_response.txt'\n\t\t\tcontent = render_to_string(template)\n\t\t\tnew_m = Message.objects.create(to=sender, _type=Message.STATIC_ONE_OFF, content=content)\n\t\t\treturn HttpResponse(content=content, content_type='text/plain')\n\t\traise Exception(\"Not yet implemented\")",
"def update_specialist_client():\n # SOLO USO PARA AMBIENTE EN DESARROLLO\n # ESTA FUNCION NO ESTA OPTIMIZADA O SEGURA\n for specialist in Specialist.objects.all():\n\n for query in Query.objects.filter(specialist=specialist):\n # Generar nodos de listado para nuevo especialista\n generateDataMessageClients(query.client.id, query.category.id, query.id,\n query.status, specialist.id)",
"def from_reg_msg(cls, reg_msg, client_id):\r\n\r\n description = reg_msg.description\r\n client_endpoint = reg_msg.client_endpoint\r\n \"\"\"\r\n If client is found in database return from database\r\n else save to database and return client instance\r\n \"\"\"\r\n if len(cls.query(client_endpoint=client_endpoint)) > 0:\r\n return cls.query(client_endpoint=client_endpoint)[0]\r\n else:\r\n rclient = cls(client_id, client_endpoint=client_endpoint, desc=description, is_reg=False)\r\n rclient.commit()\r\n return rclient",
"def client_details(self):\n return self._client_details",
"def test_ctcpQuery_CLIENTINFO(self):\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"\")\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"PING PONG\")\n info = (\n \"ACTION CLIENTINFO DCC ERRMSG FINGER PING SOURCE TIME \" \"USERINFO VERSION\"\n )\n self.assertEqual(\n self.client.methods,\n [\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", info)])),\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", None)])),\n ],\n )",
"def __idigi_receive_cb(self, path, message, response_required, timestamp):\r\n # Forward the message off to each iDigi Iridium client that we manage.\r\n iridium_manager_tracer.warning(\"Receive Data. Response_required: %d Message: %s\", \\\r\n response_required, message)\r\n for client in self.__client_list:\r\n if isinstance(client, iDigiIridiumTransportClient):\r\n response = client.receive_message(message, response_required)\r\n # If a response is required, and the client gave a us response,\r\n # we have to return the response to the callback function.\r\n # This means that only 1 client can respond to this message.\r\n # ie, first come, first served.\r\n if response_required and response != None:\r\n iridium_manager_tracer.info(\"Response: %s\", response)\r\n full_response = \"\"\r\n for res in response:\r\n full_response += res[0]\r\n return full_response\r\n\r\n iridium_manager_tracer.warning(\"No Response\")\r\n return None",
"def info(self, correlation_id: Optional[str], message: str, *args: Any, **kwargs: Any):\n pass",
"def request(self, update, context):\n\n user = self.User(update)\n user = self.__find_matching_user(user)\n message = update.message.text.lower().split(\" \")\n if len(message) != 3:\n output = \"looks like you have a little mistake in the command\\n\" \\\n \"try /bus {bus number} {station number}\" \\\n \"for example /bus 14 3\"\n else:\n try:\n line = int(message[1])\n station = int(message[2])\n if len(user.stations) >= 3 and not self.data_base.check_admin(user):\n output = \"Sorry you cannot have more than 3 requests at a time.\"\n elif line in map(lambda x: x.line_number, user.stations) and not self.data_base.check_admin(user):\n station_to_cancel = \"Error\"\n for station in user.stations:\n if station.line_number == line:\n station_to_cancel = station.station_number\n output = \"looks like you already have a request for that line so you cannot place another one\\n\" \\\n f\"if that was a mistake you can cancel your request with /cancel {line} {station_to_cancel}\"\n elif line <= 0 or line > 999:\n output = f\"line {line}, doesn't exist. try a line withing the range of 1-999\"\n elif station <= 0 or station > BusController.MAX_STATION:\n output = f\"station {station}, doesn't exist. try a station withing the range of 1-{BusController.MAX_STATION}\"\n elif self.bus_controller.check_line(line):\n self.bus_controller.add_person_to_the_station(line, station)\n output = f\"request accepted, the bus is notified\"\n self.__message_sender.send_line(line, update_passengers=True)\n self.__add_to_users_dict(update)\n else:\n self.bus_controller.add_person_to_the_station(line, station)\n output = f\"request accepted, but there are no buses available for that line yet\"\n self.__add_to_users_dict(update)\n except Exception as e:\n print(e)\n output = \"both of the values you give must be number in order to work\" \\\n \"for example, /request 14 3\"\n\n self.data_base.log(user, update.message.text, output)\n update.message.reply_text(output)",
"def Get_Message_Info(service, userId, message_id):\n message_info = service.users().messages().get(userId=userId, id=message_id).execute()\n\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name']=='Message-ID':\n message_id=header['value']\n if header['name']=='From':\n sender=header['value']\n if header['name']=='Subject':\n subject=header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n\n info = (sender, subject, thread_id, message_id, attachment_list, ID)\n return info",
"def handle(self, message):\n if not message['successful']:\n raise BayeuxError(\n 'Unsuccessful handshake response: {}'\n .format(message.get('error')))\n else:\n logger.info('Hand shook client ID %s', message['clientId'])\n self.client.client_id = message['clientId']",
"def test_get_clients_succeeds_with_valid_client_id_in_params(\n valid_client_model, client, request_headers\n):\n res = client.get(clients_url(1), headers=request_headers)\n response = res.get_json()\n\n assert response['success']\n assert response['message'] == 'client fetched successfully'\n assert response['data']['username'] == 'Leroy Jenkins'\n assert res.status_code == 200",
"def MultiReadClientMetadata(self, client_ids):\n res = {}\n for client_id in client_ids:\n self._ValidateClientId(client_id)\n md = self.metadatas.get(client_id, {})\n res[client_id] = objects.ClientMetadata(\n certificate=md.get(\"certificate\"),\n fleetspeak_enabled=md.get(\"fleetspeak_enabled\"),\n first_seen=md.get(\"first_seen\"),\n ping=md.get(\"ping\"),\n clock=md.get(\"clock\"),\n ip=md.get(\"ip\"),\n last_foreman_time=md.get(\"last_foreman_time\"),\n last_crash_timestamp=md.get(\"last_crash_timestamp\"),\n startup_info_timestamp=md.get(\"startup_info_timestamp\"))\n\n return res",
"def test_client_verification_retrieve(self):\n pass",
"def get_client_data(client_name):\n log.debug('starting get_client_data')\n clients = wf.cached_data('clients', None, max_age=0)\n\n # Loop through clients and return client with a match\n for client in clients:\n if client['name'] == client_name:\n log.debug('get_client_id finished, client_data: ' + str(client))\n return client",
"def test_client_detail(self):\n response = self.client.get(reverse('targetadmin:client-detail',\n args=[self.test_client.pk]))\n self.assertStatusCode(response, 200)\n\n client = response.context['client']\n self.assertEqual(client.name, 'mockclient')\n\n campaigns = json.loads(response.context['campaigns'])\n self.assertTrue(campaigns)\n self.assertEqual(campaigns[0], {\n 'pk': 1,\n 'name': 'Gun Control',\n 'create_dt': '2013-07-15T16:38:28Z',\n 'campaign_properties': {'status': 'draft'},\n })",
"def func_data_ok(self):\n message = self.conf_th_ic.get_item(q_key='std-messages').get('data ok') + ' {}'.format(self.message_id)\n self.func_sender(message)",
"def request_client_feedback(self, shift_id):\n try:\n event_id = self.db_handler.get_shift_extended_info_by_id(shift_id)[1]\n client_id = self.db_handler.get_client_id_by_event_id(event_id)[0]\n\n self.sock_handler.send_socket_command(f'request_feedback-{client_id}-{event_id}')\n\n self.logger.write_to_log(f'feedback was requested', 'model')\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"async def info(message, *args, **kwargs):\r\n instance = kwargs.get(\"i\")\r\n key = kwargs.get(\"key\")\r\n name = \" \".join(args).lower()\r\n for tablename, data in DataTables.items():\r\n if name in namemaps[tablename]:\r\n IDlist = namemaps[tablename][name]\r\n if instance:\r\n IDlist = [IDlist[int(instance)]]\r\n for ID in IDlist:\r\n if key:\r\n output = str(data[ID][key])\r\n else:\r\n output = dictstr(data[ID], kwargs.get(\"json\"))\r\n await reply(message, \"```\\n\" + output + \"\\n```\")\r\n return\r\n else:\r\n await reply(message, f\"\\\"{name}\\\" not found\")",
"def client_details(self):\n return ClientDetail(self._dict.get('client_details'))",
"def handle_client(client):\n\n msg = client.recv(BUFSIZ).decode(\"utf8\")\n msg_split = msg.split(\"@\")\n\n name = msg_split[1]\n vote = msg_split[2]\n\n #Enviar uma mensagem de agradecimento para o cliente\n thankYouMessage = \"Voto computado. Obrigado, \" + name + \"!\"\n client.send(bytes(thankYouMessage, \"utf8\"))\n\n clients[client] = name\n votes.append(vote)\n\n #Computar votos\n counts = np.bincount(votes)\n\n winnerCandidate = np.argmax(counts)\n winnerCandidateVotes = counts[winnerCandidate]\n\n msg = \"O candidato da legenda \" + str(winnerCandidate) + \" vence por \" + str(winnerCandidateVotes) + \" voto(s).\"\n\n broadcast(msg)",
"def __whatsmyid(self, update, context):\n user = self.User(update)\n output = f\"your ID is: {user.id}\"\n user.send_message(output)\n self.data_base.log(user, update.message.text, \"*\" * len(str(user.id)))",
"def _handler(self, client_sock, client_addr):\n try:\n msg = client_sock.recv(1024)\n data = jsonutils.loads(msg)\n client_sock.close()\n\n network_id = self._validate_field(data['network_id'],\n attributes.UUID_PATTERN)\n ip_address = str(netaddr.IPAddress(data['ip_address']))\n lease_remaining = int(data['lease_remaining'])\n self.callback(network_id, ip_address, lease_remaining)\n except ValueError, e:\n LOG.warn(_('Unable to parse lease relay msg to dict.'))\n LOG.warn(_('Exception value: %s') % e)\n LOG.warn(_('Message representation: %s') % repr(msg))\n except Exception, e:\n LOG.exception(_('Unable update lease. Exception'))",
"def info(client):\n\n return client.get_info()",
"def parse_client(self) -> None:\n if self.client:\n return\n\n app_idx = ApplicationIDExtractor(self.user_agent)\n app_id = app_idx.extract().get('app_id', '')\n\n for Parser in self.CLIENT_PARSERS:\n parser = Parser(\n self.user_agent,\n self.ua_hash,\n self.ua_spaceless,\n self.VERSION_TRUNCATION,\n ).parse()\n\n if parser.ua_data:\n self.client = parser\n self.all_details['client'] = parser.ua_data\n self.all_details['client']['app_id'] = app_id\n if app_id:\n if app_id in self.all_details['client']['name']:\n self.all_details['client']['name'] = app_idx.pretty_name()\n elif app_idx.override_name_with_app_id(client_name=parser.name()):\n self.supplement_secondary_client_data(app_idx)\n return\n\n # if no client matched, still add name / app_id values\n if app_id:\n self.all_details['client'] = {\n 'name': app_idx.pretty_name(),\n 'app_id': app_id,\n }",
"def get_client_info(self, client_id):\n\n url = self.api_base_url + \"/\" + str(client_id)\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n\n return jsonified_response",
"def note(self, irc, msg, args, user, id):\n try:\n note = self.db.get(id)\n except dbi.NoRecordError:\n irc.errorInvalid('note id')\n if user.id != note.frm and user.id != note.to:\n s = 'You may only retrieve notes you\\'ve sent or received.'\n irc.error(s)\n return\n newnote = self._formatNote(note, user.id)\n irc.reply(newnote, private=(not note.public))\n self.db.setRead(id)",
"def test_client_nationlity_retrieve(self):\n pass",
"def _assemble_and_send_request(self):\r\n client = self.client\r\n \r\n \r\n # We get an exception like this when specifying an IntegratorId:\r\n # suds.TypeNotFound: Type not found: 'IntegratorId'\r\n # Setting it to None does not seem to appease it.\r\n \r\n del self.ClientDetail.IntegratorId\r\n \r\n # Fire off the query.\r\n response = client.service.postalCodeInquiry(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n PostalCode = self.PostalCode,\r\n CountryCode = self.CountryCode)\r\n\r\n return response",
"def send_dailies(self, line, irc_handler, db_handler):\n\n #Get the irc user.\n user = irc_handler.get_irc_user_name(line[0])\n\n today = datetime.datetime.now()\n\n #Get the format into dd/MM/YYYY\n todays_date = today.strftime(\"%d/%m/%Y\")\n\n #Set empty list to receive reminder messages\n message_list = []\n\n try:\n \n #Get all messages from database.\n message_list = db_handler.get_daily_reminders(user, todays_date)\n \n except Exception as e:\n print(\"Error getting data from table.\")\n print(e)\n\n if(len(message_list) == 0):\n irc_handler.private_message_user(user, \"You have no appointments for today Sir.\")\n else:\n irc_handler.private_message_user(user, \"Your appointments for today Sir:\")\n\n for message in message_list:\n irc_handler.private_message_user(user, message)",
"def __disco_info(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n info=self.disco_get_info(node,iq)\n if isinstance(info,DiscoInfo):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-info query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))\n resp.set_content(info.xmlnode.copyNode(1))\n elif isinstance(info,Stanza):\n resp=info\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-info response: %s\" % (resp.serialize(),))\n self.stream.send(resp)",
"def on_data(self, data):\r\n if 'in_reply_to_status_id' in data:\r\n self.keep_or_update_tgid()\r\n self.insert_data(data)",
"def post(self):\n \n first_intl = self.request.get(\"first_name\")[:3]\n last_name = self.request.get(\"last_name\")\n \n first_i_intl = self.request.get('int_first_name')[:3]\n last_i_name = self.request.get('int_last_name')\n \n if first_i_intl == \"\":\n interviewer_id = self.request.get('interviewer')\n else:\n interviewer_id = first_i_intl + last_i_name\n interviewer = Interviewer()\n interviewer.i_short = interviewer_id\n interviewer.first_name = self.request.get('int_first_name')\n interviewer.last_name = self.request.get('int_last_name')\n interviewer.key = ndb.Key(Interviewer, interviewer_id)\n interviewer.put()\n \n \n if first_intl == \"\":\n candidate_id = self.request.get('candidate')\n else:\n candidate_id = first_intl + last_name\n candidate = Candidate()\n candidate.c_short = candidate_id\n candidate.first_name = self.request.get(\"first_name\")\n candidate.last_name = self.request.get(\"last_name\")\n \n dNow = datetime.now()\n \n sNM = dNow.month\n sNY = dNow.year\n \n sNum = (sNY - BASE_SESSION_YEAR)*3 + mapYear(sNM) + 1\n \n \"\"\"candidate.session_num = sNum\"\"\"\n\n candidate.key = ndb.Key(Candidate, candidate_id)\n candidate.put()\n \n \n feedback = Feedback(parent=candidate_key(candidate_id))\n \n feedback.interviewer = interviewer_id\n feedback.company = self.request.get('company')\n \"\"\"Ratings\"\"\"\n feedback.personality_scr = self.request.get('personality_scr')\n feedback.personality_descrip = self.request.get('personality_descrip')\n feedback.appearance_scr = self.request.get('appearance_scr')\n feedback.appearance_descrip = self.request.get('appearance_descrip')\n feedback.comm_scr = self.request.get('comm_scr')\n feedback.comm_descrip = self.request.get('comm_descrip')\n feedback.tech_scr = self.request.get('tech_scr')\n feedback.tech_descrip = self.request.get('tech_descrip')\n feedback.body_scr = self.request.get('body_scr')\n feedback.body_descrip = self.request.get('body_descrip')\n \"\"\"Additional Feedback responses\"\"\"\n feedback.pres_scr = self.request.get('pres_scr')\n feedback.pres_descrip = self.request.get('pres_descrip')\n feedback.exp_scr = self.request.get('exp_scr')\n feedback.rec_descrip = self.request.get('rec_descrip')\n feedback.thanks_scr = self.request.get('thanks_scr')\n \n dNow = datetime.now() \n sNM = dNow.month\n sNY = dNow.year\n sNum = (sNY - BASE_SESSION_YEAR)*3 + mapYear(sNM) + 1\n feedback.cycle = sNum\n \n feedback.put()\n \n self.redirect('/thanks')",
"def received_information(update: Update, context: CallbackContext) -> int:\r\n user_data = context.user_data\r\n text = update.message.text\r\n category = user_data['choice']\r\n user_data[category] = text\r\n del user_data['choice']\r\n\r\n update.message.reply_text(\r\n \"Genial, tu pedido está avanzando de esta manera:\"\r\n f\"{facts_to_str(user_data)}Puedes agregar algún comentario o cambio en tu orden en Comentarios...\",\r\n reply_markup=markup,\r\n )\r\n\r\n return CHOOSING",
"def wrap_information_object(self, type_id, vsq, message):\n result = b''\n i = 0\n if not type(type_id) is int:\n return \"ERROR: The type identification has to be an integer.\"\n if not type(vsq) is int:\n return \"ERROR: The variable structure qualifier has to be an integer.\"\n if (not type(message) is list) or (len(message) > 128):\n return \"ERROR: The message has to be a list containing less than 128 objects/elements.\"\n # Number of objects/elements expected based on the VSQ value\n length = (vsq & 0x7F)\n # No information object needed\n if length == 0:\n return result\n if length > len(message):\n return \"ERROR: Variable structure qualifier expects more messages than given.\"\n if length < len(message):\n return \"ERROR: Variable structure qualifier expects fewer messages than given.\"\n # SQ == 1\n if (vsq & 0x80) == 0x80:\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n if type_id == M_BO_NA_1:\n while i < length:\n temp = self.wrap_information_object_m_bo_na_1(message[i])\n if type(temp) is str:\n return temp\n result += temp\n i += 1\n elif type_id == M_ME_NC_1:\n while i < length:\n temp = self.wrap_information_object_m_me_nc_1(message[i])\n if type(temp) is str:\n return temp\n result += temp\n i += 1\n else: \n return \"ERROR: The ASDU type was not recognized or is not fit to be a sequence of elements.\"\n # SQ == 0\n else:\n if type_id == M_BO_NA_1:\n while i < length:\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n temp = self.wrap_information_object_m_bo_na_1(message[i])\n if type(temp) is str:\n return temp\n result += temp\n i += 1\n elif type_id == M_ME_NC_1:\n while i < length:\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n temp = self.wrap_information_object_m_me_nc_1(message[i])\n if type(temp) is str:\n return temp\n result += temp\n i += 1\n elif type_id == C_SC_NA_1:\n if length != 1:\n return \"ERROR: C_SC_NA_1 length has to be 1.\"\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n temp = self.wrap_information_object_c_sc_na_1(message[0])\n if type(temp) is str:\n return temp\n result += temp\n elif type_id == C_IC_NA_1:\n if length != 1:\n return \"ERROR: C_IC_NA_1 length has to be 1.\"\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n temp = self.wrap_information_object_c_ic_na_1(message[0])\n if type(temp) is str:\n return temp\n result += temp\n elif type_id == C_RD_NA_1:\n if length != 1:\n return \"ERROR: C_RD_NA_1 length has to be 1.\"\n temp = self.wrap_information_object_address()\n if type(temp) is str:\n return temp\n result += temp\n else:\n return \"ERROR: The ASDU type was not recognized or has to be a sequence of elements.\"\n return result",
"def create_message(self,message_information):\n\n #create the message\n if message_information['service'] == 'whatsapp':\n #The details of the message sent is saved here\n message = self.client.messages.create(\n body='Your order of {food} from the {store_name} store is ready. Additional Information: {addit_info}'.format(**message_information),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to='whatsapp:{mobile}'.format(**message_information)\n )\n #Set the ready state to waiting for collection\n #self.firebase.db.child('orders').child('{store_name}'.format(**message_information)).child('{order_id}'.format(**message_information)).update({\"ready\":\"waiting for collection\"})\n self.firebase.update([\"orders\", STORE_NAME, message_information[\"order_id\"]], {\"ready\": \"waiting for collection\"})",
"def cve_announcement(self, cve_code):\n response = self._request(\"GET\", [ROUTE_CVE_ANNOUNCEMENTS, cve_code])\n if response.status_code != 200:\n logging.error(\"Error server id::{}\".format(response.text))\n return None\n\n return CBWParser().parse_response(CBWCve, response)",
"def inform(self, information):\n self.know = merge(self.know, information)",
"def test_context_data_info_message_for_multiple_result(self):\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum')\n factories.SourceDatasetFactory.create(i_dbgap_description='lorem ipsum 2')\n response = self.client.get(self.get_url(), {'description': 'lorem'})\n messages = list(response.wsgi_request._messages)\n self.assertEqual(len(messages), 1)\n self.assertEqual(str(messages[0]), '2 results found.')",
"def handle_ACS_worklist_info_response(self,message,conn):\n response=ResponseClientHandle.switch_msg_stream_type_str2dict(message)\n \n msg_type=response.get(event.KEY_MESSAGE)\n msg_group = int(msg_type) & 0xFF00\n \n if (msg_group == event.EVENT_WORKLIST_GROUP):\n \n # check worklist reseve response\n if(msg_type == event.EV_WORKLIST_RESERVE_RSP):\n log.debug_info(\"ACS server's response worklist reserve suc\")\n\n # call worklist execute start request \n DUTqueue.ResponseWLexecHandle.handle_WLexec_start_request(self.msg,response,None)\n \n elif(msg_type == event.EV_WORKLIST_RESERVE_FAIL):\n log.debug_info(\"ACS server's response worklist reserve fail\")\n\n ResponseClientHandle.handle_send_response(response,conn)\n \n # check worklist start response \n elif(msg_type == event.EV_WORKLIST_EXEC_START_RSP):\n log.debug_info(\"ACS server's response worklist execute start suc\")\n \n # call worklist execute request\n DUTqueue.ResponseWLexecHandle.handle_WLexec_request(self.dut_obj_handle,self.msg,response,conn)\n\n elif(msg_type == event.EV_WORKLIST_EXEC_START_FAIL):\n log.debug_info(\"ACS server's response worklist execute start fail\")\n \n ResponseClientHandle.handle_send_response(response,conn)\n\n # check worklist finish response \n elif(msg_type == event.EV_WORKLIST_EXEC_FINISH_RSP):\n log.debug_info(\"ACS server's response worklist execute finish suc\")\n\n elif(msg_type == event.EV_WORKLIST_EXEC_FINISH_FAIL):\n log.debug_info(\"ACS server's response worklist execute finish fail\")\n \n # check worklist build/bind/download response\n else:\n ResponseClientHandle.handle_send_response(response,conn)\n \n else:\n err_info = \"Unsupport msg event group:%d\" % msg_group\n log.debug_info(err_info)\n ResponseClientHandle.handle_except(self.msg,self.conn,err_info)",
"def addOne():\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n insert(Followup).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Add the given client'}\n return {'status': \"Adding Succesful\"}",
"def test_adviser_notified(self, mocked_notify_client):\n order = OrderFactory()\n adviser = AdviserFactory()\n creator = AdviserFactory()\n\n notify.adviser_added(\n order=order,\n adviser=adviser,\n by=creator,\n creation_date=dateutil_parse('2017-05-18'),\n )\n\n assert mocked_notify_client.send_email_notification.called\n call_args = mocked_notify_client.send_email_notification.call_args_list[0][1]\n assert call_args['email_address'] == adviser.contact_email\n assert call_args['template_id'] == Template.you_have_been_added_for_adviser.value\n\n assert call_args['personalisation']['recipient name'] == adviser.name\n assert call_args['personalisation']['creator'] == creator.name\n assert call_args['personalisation']['creation date'] == '18/05/2017'",
"def top021_incident_report(self):\n\n # Copy incoming message\n outgoing_message = self.incoming_message\n\n # Change topic name in header\n outgoing_message['header']['topicName'] = \"TOP101_INCIDENT_REPORT\"\n\n # Handle PSAP incident ID\n incident_id = outgoing_message['body']['incidentID']\n\n # If an alert, calculate the PSAP incident ID\n if outgoing_message['header']['actionType'] == \"Alert\":\n lat = outgoing_message['body']['position']['latitude']\n long = outgoing_message['body']['position']['longitude']\n\n # Check if this incident is nearby previous incidents\n psap_incident_id = self.calculate_psap_incident_id(incident_id, lat, long)\n\n # Insert the PSAP incident ID to the KB\n self.webgenesis_client.set_incident_report_psap_id(incident_id, psap_incident_id)\n # Set the PSAP incident ID to the TOP101 message\n outgoing_message['body']['incidentID'] = psap_incident_id\n\n # If an update, get the existing PSAP incident ID\n else:\n psap_incident_id = self.webgenesis_client.get_incident_report_psap_id(incident_id)\n outgoing_message['body']['incidentID'] = psap_incident_id\n\n # TODO: TEMP - If attachement type is empty, do nothing\n try:\n if outgoing_message['body']['attachments'][0][\"attachmentType\"] == \"\":\n return\n except:\n pass\n\n # Get the location (lat,long) of the psap incident\n psap_indicent_location = self.webgenesis_client.get_location_of_incident_report(\n outgoing_message['body']['incidentID'])\n if psap_indicent_location is not None:\n outgoing_message['body']['position']['latitude'] = psap_indicent_location[\"lat\"]\n outgoing_message['body']['position']['longitude'] = psap_indicent_location[\"long\"]\n\n # Update incident category\n outgoing_message['body']['incidentCategory'] = self.webgenesis_client.get_incident_category(\n outgoing_message['body']['incidentID'])\n\n # TODO: TEMP\n # Update incident originator\n outgoing_message['body']['incidentOriginator'] = \"KB\"\n\n # Update severity\n outgoing_message['body']['severity'] = self.webgenesis_client.get_incident_cluster_severity(\n outgoing_message['body']['incidentID'])\n\n if Reasoner._proceed_with_TOP101(outgoing_message):\n # Produce outgoing message\n persistent_fields.apply_persistent(outgoing_message)\n self.produce_message(outgoing_message['header']['topicName'], outgoing_message)\n print(\">> TOP101 Incident report sent to PSAP\")\n else:\n print(\"TOP101 is of type image or video, no TOP101 will be sent\")",
"def getmessage(self, update, context):\r\n\r\n redirect_uri = \"https://thawing-ridge-47246.herokuapp.com\"\r\n\r\n # настройка соединения\r\n flow = Flow.from_client_secrets_file(\r\n 'credentials.json',\r\n scopes=SCOPES,\r\n redirect_uri=redirect_uri)\r\n\r\n code = self.get_code()\r\n\r\n flow.fetch_token(code=code, code_verifier=\"111\") # устанавливаем соединение с гуглом\r\n\r\n session = flow.authorized_session() # создаем сессию\r\n response = session.get('https://www.googleapis.com/gmail/v1/users/me/messages').json() # формируем запрос и получаем ответ сервера\r\n\r\n messages = response[\"messages\"]\r\n\r\n # у каждого из сообщений достаем id\r\n for message in messages[0:10]:\r\n mid = message['id']\r\n\r\n # получаем сообщение по id\r\n message_message = session.get(f'https://www.googleapis.com/gmail/v1/users/me/messages/{mid}').json()\r\n\r\n # информация об отправителе, получателе и теме сообщения хранится в ключе 'payload' --> 'headers'\r\n headers = message_message['payload']['headers']\r\n\r\n from_who = None\r\n to_whom = None\r\n subject = None\r\n\r\n for item in headers:\r\n if item['name'] == 'From':\r\n from_who = item['value']\r\n elif item['name'] == 'To':\r\n to_whom = item['value']\r\n elif item['name'] == 'Subject':\r\n subject = item['value']\r\n\r\n # ищем текст сообщения\r\n # достаем из сообщения его части\r\n message_payload_parts = message_message['payload']['parts']\r\n zero_part = message_payload_parts[0]\r\n\r\n if zero_part['mimeType'] == 'text/plain':\r\n self.message_without_attachments(context, message_payload_parts, from_who, to_whom, subject)\r\n elif zero_part['mimeType'] == 'multipart/alternative':\r\n self.message_with_attachments(session, mid, context, zero_part, message_payload_parts, from_who,\r\n to_whom, subject)\r\n\r\n context.bot.send_message(chat_id=update.message.chat_id, text=f'Done.')",
"def test_get_invoice_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_info(invoice_id=1)\n\n assert actual_data == expected_data"
] |
[
"0.5811888",
"0.55938053",
"0.5496945",
"0.54101413",
"0.5383944",
"0.53775185",
"0.5357722",
"0.53567284",
"0.5239797",
"0.5234531",
"0.5225566",
"0.5214078",
"0.51944524",
"0.519301",
"0.518485",
"0.51753753",
"0.5170485",
"0.5164536",
"0.51571465",
"0.51501626",
"0.5137739",
"0.51334566",
"0.51262134",
"0.5115311",
"0.51064754",
"0.5098813",
"0.5091624",
"0.50884926",
"0.5081421",
"0.50682795",
"0.50594974",
"0.50434834",
"0.5037525",
"0.50297344",
"0.5028423",
"0.5023521",
"0.5018789",
"0.5016264",
"0.50135154",
"0.4962933",
"0.4952941",
"0.49430007",
"0.49418485",
"0.49318862",
"0.49195525",
"0.4916941",
"0.49118",
"0.49059528",
"0.4903285",
"0.49014002",
"0.49004075",
"0.4898182",
"0.4897292",
"0.48970708",
"0.48814625",
"0.48757312",
"0.4872617",
"0.48683354",
"0.48577273",
"0.4856459",
"0.48560408",
"0.48487934",
"0.48435545",
"0.48334673",
"0.48285645",
"0.48264217",
"0.4826416",
"0.48195502",
"0.48121896",
"0.4809219",
"0.4804936",
"0.4801563",
"0.479978",
"0.4796706",
"0.47950488",
"0.4793235",
"0.4786041",
"0.4774651",
"0.4771593",
"0.4771133",
"0.47674322",
"0.47656006",
"0.475061",
"0.47499788",
"0.47493723",
"0.4748659",
"0.47467467",
"0.47465944",
"0.47419026",
"0.4736884",
"0.4735206",
"0.47347516",
"0.47346509",
"0.47317433",
"0.47300005",
"0.47215748",
"0.47161433",
"0.47121006",
"0.4711942",
"0.4706733"
] |
0.6117542
|
0
|
Validates received protocol compression parameter. Sets protocol compression to be used in session according to result of this validation.
|
def handle_protocol_compression(self, session):
if self.protocol_compression is not None:
valid = RPCS.VALID_COMPRESSION_METHODS
if self.protocol_compression not in valid:
self.protocol_compression = 'NONE'
raise ClientRequestError('InvalidParameterValue', data='protocol_compression')
else:
self.protocol_compression = 'NONE'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def validate(\n self,\n is_full_msg: bool,\n msg_type: Optional[bytes],\n header_len: int,\n payload_len: Optional[int],\n input_buffer: InputBuffer\n ) -> None:\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_starting_sequence(input_buffer)\n\n if self._size_validation_settings is not None:\n self._validate_payload_length(msg_type, payload_len)\n\n if self._connection_protocol_version >= self.FIRST_VALIDATING_VERSION:\n self._validate_control_flags(is_full_msg, header_len, payload_len, input_buffer)",
"def test_compression_level(self):\n test_compression_level = 8\n self.encoder._compression_level = test_compression_level",
"def setProtocolOptions(self,\n version=None,\n utf8validateIncoming=None,\n acceptMaskedServerFrames=None,\n maskClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n serverConnectionDropTimeout=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionOffers=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None):",
"def check_codec(self):\n codecs = list(set([s[\"codec\"] for s in self.segments]))\n for c in codecs:\n if c != \"h264\":\n raise P1203StandaloneError(\"Unsupported codec: {}\".format(c))",
"def validate_form_fields(self, protocol):\n\n if protocol and protocol not in [p.get(\"name\") for p in self.protocols]:\n return UNKNOWN_PROTOCOL",
"def check_protocol_version(self):\n try:\n protocol_version = self.do_command(\"protocol_version\")\n except BadGtpResponse:\n return\n if protocol_version != \"2\":\n raise BadGtpResponse(\"%s reports GTP protocol version %s\" %\n (self.name, protocol_version))",
"def _valid_protocol_type(protocol):\n\n if protocol == 'ssh' or protocol == 'https':\n return True\n\n return False",
"def validate(self, options):\n try:\n codecs.getencoder(options.char_encoding)\n except LookupError:\n self.parser.error(\"invalid 'char-encoding' %s\" % options.char_encoding)",
"def validate_required_protocol(dictionary, yaml_file):\n\n validate_dict_contains_value(dictionary, 'defaults', 'protocol', yaml_file)\n validate_type(dictionary['protocol'], 'protocol', str, 'str', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']",
"def check_encoding_negotiation(self):\n from .telopt import DO, BINARY\n if self._closing:\n return\n\n # encoding negotiation is complete\n if self.outbinary and self.inbinary:\n self.log.debug('negotiated outbinary and inbinary with client.')\n\n # if (WILL, BINARY) requested by begin_negotiation() is answered in\n # the affirmitive, then request (DO, BINARY) to ensure bi-directional\n # transfer of non-ascii characters.\n elif self.outbinary and not self.inbinary and (\n not (DO, BINARY,) in self.stream.pending_option):\n self.log.debug('outbinary=True, requesting inbinary.')\n self.stream.iac(DO, BINARY)\n self._loop.call_later(self.CONNECT_DEFERRED,\n self.check_encoding_negotiation)\n\n elif self.duration > self.CONNECT_MAXWAIT:\n # Perhaps some IAC interpreting servers do not differentiate\n # 'local' from 'remote' options -- they are treated equivalently.\n self.log.debug('failed to negotiate both outbinary and inbinary.')\n\n else:\n self._loop.call_later(self.CONNECT_DEFERRED,\n self.check_encoding_negotiation)",
"def validate(self) -> None:\n super().validate()\n if self.pipe_mode.value is SocketMode.CONNECT and self.pipe_format.value is None:\n raise Error(\"'pipe_format' required for CONNECT pipe mode.\")",
"def handle_ProtocolHeaderFrame(self,\n frame: amqpframe.ProtocolHeaderFrame):\n self._fsm.trigger('receive_ProtocolHeaderFrame')\n raise exceptions.UnsupportedProtocol(\n frame.payload.protocol_major,\n frame.payload.protocol_minor,\n frame.payload.protocol_revision,\n )",
"def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...",
"def validate(self, protocol, *args, **kwargs):\n assert len(args) >= len(protocol[\"required\"]), \\\n \"Protocol {} has an arity of {}. Called with {}\".format(\n protocol['procedure'], len(protocol[\"required\"]), len(args))\n\n assert len(args) <= len(protocol[\"required\"]) + \\\n len(protocol[\"optional\"]), \\\n \"Protocol {} has an arity of {}. Called with {}\".format(\n protocol['procedure'], len(protocol[\"required\"]), len(args))",
"def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n self.protocol = self.protocol or \"http\"",
"def validate_backend_version(self):\n pass",
"def load_balance_compression(self, load_balance_compression):\n allowed_values = [\"DO_NOT_COMPRESS\", \"COMPRESS_ATTRIBUTES_ONLY\", \"COMPRESS_ATTRIBUTES_AND_CONTENT\"]\n if load_balance_compression not in allowed_values:\n raise ValueError(\n \"Invalid value for `load_balance_compression` ({0}), must be one of {1}\"\n .format(load_balance_compression, allowed_values)\n )\n\n self._load_balance_compression = load_balance_compression",
"def test_supported_protocol(self):\n assert self.handler.SUPPORTED_PROTOCOL is None",
"def _validate_header(self, cleartext_header, decrypted_header):\n import struct\n\n magic_number1 = struct.unpack(\"!I\", decrypted_header[:4])[0]\n # file_version = struct.unpack(\"!I\", decrypted_header[4:8])[0]\n # key_stretches = struct.unpack(\"!I\", decrypted_header[8:12])[0]\n magic_number2 = struct.unpack(\"!I\", decrypted_header[12:])[0]\n if (self.__magic_number != magic_number1 or\n self.__magic_number != magic_number2):\n raise DecryptionError()\n if cleartext_header != decrypted_header:\n raise FileCorruptionError()",
"def validate_subprotocol(subprotocol, hixie):\n\n if not subprotocol:\n raise HandshakeException('Invalid subprotocol name: empty')\n if hixie:\n # Parameter should be in the range U+0020 to U+007E.\n for c in subprotocol:\n if not 0x20 <= ord(c) <= 0x7e:\n raise HandshakeException(\n 'Illegal character in subprotocol name: %r' % c)\n else:\n # Parameter should be encoded HTTP token.\n state = http_header_util.ParsingState(subprotocol)\n token = http_header_util.consume_token(state)\n rest = http_header_util.peek(state)\n # If |rest| is not None, |subprotocol| is not one token or invalid. If\n # |rest| is None, |token| must not be None because |subprotocol| is\n # concatenation of |token| and |rest| and is not None.\n if rest is not None:\n raise HandshakeException('Invalid non-token string in subprotocol '\n 'name: %r' % rest)",
"def pes_packet_check_formedness(payload):\n b1 = ord(payload[0])\n b2 = ord(payload[1])\n b3 = ord(payload[2])\n\n b4 = ord(payload[3])\n if b1 != 0 or b2 != 0 or b3 != 1:\n return False\n return True",
"def validate(self):\n other = HandshakeSettings()\n\n self._copy_cipher_settings(other)\n self._copy_extension_settings(other)\n self._copy_key_settings(other)\n\n other.pskConfigs = self.pskConfigs\n other.psk_modes = self.psk_modes\n\n if not other.certificateTypes:\n raise ValueError(\"No supported certificate types\")\n\n self._sanityCheckKeySizes(other)\n\n self._sanityCheckPrimitivesNames(other)\n\n self._sanityCheckProtocolVersions(other)\n\n self._sanityCheckExtensions(other)\n\n if other.maxVersion < (3, 3):\n # No sha-2 and AEAD pre TLS 1.2\n other.macNames = [e for e in self.macNames if\n e == \"sha\" or e == \"md5\"]\n\n self._sanityCheckPsks(other)\n\n self._sanityCheckTicketSettings(other)\n\n self._sanity_check_implementations(other)\n self._sanity_check_ciphers(other)\n\n return other",
"def _set_compression_capabilities(self, caps, compression, algorithms=None):\n compression_data = caps.get(\"compression\")\n if compression_data is None:\n msg = \"Compression requested but the server does not support it\"\n if compression == Compression.REQUIRED:\n raise NotSupportedError(msg)\n _LOGGER.warning(msg)\n return None\n\n compression_dict = {}\n if isinstance(compression_data, dict): # C extension is being used\n for fld in compression_data[\"value\"][\"obj\"][\"fld\"]:\n compression_dict[fld[\"key\"]] = [\n value[\"scalar\"][\"v_string\"][\"value\"].decode(\"utf-8\")\n for value in fld[\"value\"][\"array\"][\"value\"]\n ]\n else:\n for fld in compression_data.value.obj.fld:\n compression_dict[fld.key] = [\n value.scalar.v_string.value.decode(\"utf-8\")\n for value in fld.value.array.value\n ]\n\n server_algorithms = compression_dict.get(\"algorithm\", [])\n algorithm = None\n\n # Try to find an algorithm from the requested compression algorithms\n # list, which is supported by the server\n if algorithms:\n # Resolve compression algorithms aliases and ignore unsupported\n client_algorithms = [\n COMPRESSION_ALGORITHMS[item]\n for item in algorithms\n if item in COMPRESSION_ALGORITHMS\n ]\n matched = [item for item in client_algorithms if item in server_algorithms]\n if matched:\n algorithm = COMPRESSION_ALGORITHMS.get(matched[0])\n elif compression == Compression.REQUIRED:\n raise InterfaceError(\n \"The connection compression is set as \"\n \"required, but none of the provided \"\n \"compression algorithms are supported.\"\n )\n else:\n return None # Disable compression\n\n # No compression algorithms list was provided or couldn't found one\n # supported by the server\n if algorithm is None:\n if HAVE_ZSTD and \"zstd_stream\" in server_algorithms:\n algorithm = \"zstd_stream\"\n elif HAVE_LZ4 and \"lz4_message\" in server_algorithms:\n algorithm = \"lz4_message\"\n else:\n algorithm = \"deflate_stream\"\n\n if algorithm not in server_algorithms:\n msg = (\n \"Compression requested but the compression algorithm \"\n \"negotiation failed\"\n )\n if compression == Compression.REQUIRED:\n raise InterfaceError(msg)\n _LOGGER.warning(msg)\n return None\n\n self.protocol.set_capabilities(compression={\"algorithm\": algorithm})\n return algorithm",
"def parse_handshake(self, data):\n\n if (data[0] != len(PSTR) or data[1:20] != PSTR\n or data[28:48] != self.factory.torrent.info_hash):\n\n self.transport.loseConnection()\n else:\n self.handshaked = True\n\n reserved = data[20:28]\n if reserved[7] & ord('\\x04'):\n self.fast_extension = True\n\n if reserved[7] & ord('\\x01'):\n self.dht = True",
"def compare_protocol_versions(self, session):\n # First parse protocol version strings to check for invalid formatting\n invalid_string = self.parse_protocol_version(\n [self.earliest_protocol_version, self.latest_protocol_version])\n if invalid_string is not None:\n # Error during protocol string parsing\n data = ('earliest_protocol_version'\n if invalid_string == self.earliest_protocol_version else 'latest_protocol_version')\n raise ClientRequestError('InvalidParameterValue', data=data)\n\n # Check if protocol version is supported and define the one to use\n self.protocol_version = self.determine_supported_protocol(\n self.earliest_protocol_version, self.latest_protocol_version)",
"def validate_encryption(self) -> bool:\n # Receive the first encrypted message from server\n message = self.receive()\n if message != Message.HI:\n print(\"Encryption error! Closing this socket...\")\n return False\n # Send the first encrypted message\n self.send(Message.HI)\n # Receive the encrypted OK message\n message = self.receive()\n if message == Message.OK:\n print(\"Encryption is established.\")\n return True\n else:\n print(\"Encryption error! Closing this socket...\")\n return False",
"def isProtocolDefined(self) -> bool:\n ...",
"def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None",
"def protocol_match(cls, script_bytes: bytes) -> bool:\n # fast test -- most ScriptOutputs that aren't SLP will fail here quickly\n if not script_bytes.startswith(cls._protocol_prefix):\n return False\n # fast test passed -- next try the slow test -- attempt to parse and\n # validate OP_RETURN message\n try:\n # raises on parse error\n slf = cls(script_bytes)\n # should always be not None\n if slf.message is not None:\n # save parsed message since likely it will be needed again very soon\n # by class c'tor\n cls._script_message_cache.put(slf.script, slf.message)\n return True\n except Error:\n pass\n except Exception:\n # DEBUG XXX FIXME\n import sys\n import traceback\n\n traceback.print_exc(file=sys.stderr)\n pass\n return False",
"def validate_optional_protocol(dictionary, yaml_file):\n\n if 'protocol' in dictionary:\n validate_type(dictionary['protocol'], 'protocol', str, 'protocol', yaml_file)\n validate_protocol_type(dictionary, yaml_file)\n del dictionary['protocol']",
"def pre_validate_struct(self, struct):\n pass",
"def set_compression(self, compression):\n converter = geowave_pkg.datastore.redis.config.RedisOptions.CompressionConverter()\n self._java_ref.setCompression(converter.convert(compression))",
"def setProtocolOptions(self,\n versions=None,\n webStatus=None,\n utf8validateIncoming=None,\n maskServerFrames=None,\n requireMaskedClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None,\n serveFlashSocketPolicy=None,\n flashSocketPolicy=None,\n allowedOrigins=None,\n allowNullOrigin=False,\n maxConnections=None,\n trustXForwardedFor=0):",
"def _get_compression(filepath, compression):\n # user has specified \"no compression\"\n if compression is None:\n return None\n # user wants us to infer compression from filepath\n elif compression == \"infer\":\n ext = filepath.suffix\n try:\n return _ext_to_compression[ext.lower()]\n except KeyError:\n return None\n # user has specified compression; validate it\n elif compression in _ext_to_compression.values():\n return compression\n else:\n valid_values = [None, \"infer\"] + sorted(_ext_to_compression.values())\n raise ValueError(\n errors_.value_invalid_msg(\"compression\", compression, valid_values)\n )",
"def validate_encoding(encoding):\n try:\n codecs.lookup(encoding)\n return True\n except LookupError:\n return False",
"def _should_send_binary(self) -> bool:\n if not self.binary_support:\n return False\n\n content_type = self._get_content_type()\n if not content_type.startswith(self.non_binary_content_type_prefixes):\n return True\n\n content_encoding = self._get_content_encoding()\n # Content type is non-binary but the content encoding might be.\n return \"gzip\" in content_encoding.lower()",
"def isGzippable(self, css=0, js=0, REQUEST=None):\n # force: force http compression even if the browser doesn't send an accept\n # debug: return compression state (0: no, 1: yes, 2: force)\n # css: set this to 1 inside a css file (for later use)\n # js: set this to 1 inside a js file (for later use)\n\n if REQUEST is None:\n REQUEST = self.REQUEST\n use_gzip = self.getGzip()\n if not self.getEnabled():\n use_gzip = 'never'\n\n force = 0\n if use_gzip == 'never':\n enable_compression = 0\n elif use_gzip == 'always':\n enable_compression = 1\n force = 1\n elif use_gzip == 'accept-encoding':\n # compress everything except css and js\n enable_compression = 1\n elif use_gzip == 'accept-encoding+user-agent':\n # gzip compatibility info courtesy of\n # http://httpd.apache.org/docs/2.2/mod/mod_deflate.html\n user_agent = REQUEST.get('HTTP_USER_AGENT', '')\n if user_agent.startswith('Mozilla/4'):\n # Netscape 4.x can't handle gzipped css and js\n enable_compression = (css==0 and js==0)\n # Netscape 4.0.6-4.0.8 has some gzip-related bugs\n if user_agent[len('Mozilla/4.')] in ('6','7','8'):\n enable_compression = 0\n # Some versions of MSIE pretend to be Netscape 4.x but are OK with gzipping\n if user_agent.find('MSIE'):\n enable_compression = 1\n\n return (enable_compression, force, REQUEST.get('HTTP_ACCEPT_ENCODING', '').find('gzip') != -1)",
"def _get_mix_encoding(self):\n self._validate_mix_encoding()\n if self.mix_encoding == \"auto\":\n if self._get_mode() == \"Explain\":\n return False\n if self._get_mode() == \"Perform\":\n return False\n if self._get_mode() == \"Compete\":\n return True\n if self._get_mode() == \"Optuna\":\n return False\n else:\n return deepcopy(self.mix_encoding)",
"def validate_algorithm(self, algorithm):\n # type: (Text) -> None\n if not algorithm == self.java_name:\n raise InvalidAlgorithmError(\n 'Requested algorithm \"{requested}\" is not compatible with cipher \"{actual}\"'.format(\n requested=algorithm, actual=self.java_name\n )\n )",
"def SetDataEncoding(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def __process_ethframe(self, eth_header: bytes) -> Dict[str, Any]:\n eth = struct.unpack('!6s6sH', eth_header)\n eth_protocol = socket.ntohs(eth[2])\n\n return {\n 'header_length': Sniffer.ETH_HEADER_LENGTH,\n 'protocol': eth_protocol,\n }",
"def is_compressed(self):\n if self.is_transfer_syntax:\n # Explicit VR Little Endian\n # Implicit VR Little Endian\n # Explicit VR Big Endian\n # Deflated Explicit VR Little Endian\n if self in ['1.2.840.10008.1.2', '1.2.840.10008.1.2.1',\n '1.2.840.10008.1.2.2', '1.2.840.10008.1.2.1.99']:\n return False\n\n # All encapsulated transfer syntaxes\n return True\n\n raise ValueError('UID is not a transfer syntax.')",
"def validate_extension(extension):\n\n error_flag = 0\n error_string = ''\n\n if isinstance(extension, dict):\n try:\n schema = jsonref.load_uri(extension['extension_schema'])\n try:\n print(\"Loaded Extension Schema: \", schema['title'])\n name = schema['title']\n error_string, error_flag = bco_validator(schema, extension)\n\n # For if the schema has no ['title']\n except KeyError:\n print(\"Loaded Extension Schema: \", schema['$id'])\n name = schema['$id']\n\n except json.decoder.JSONDecodeError:\n print('Failed to load extension schema', schema['$id'])\n error_flag += 1\n\n except TypeError:\n print('Failed to load extension schema. \\nInvalid format ', )\n print(extension)\n error_string += json.dumps(extension)\n error_flag += 1\n\n else:\n print('Invalid BCO extension format')\n error_string += json.dumps(extension)\n error_flag = 1\n\n if error_flag == 0:\n print(name + ' PASSED \\U0001F44D')\n return error_string, error_flag",
"def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta",
"def validate_protocol_type(dictionary, yaml_file):\n\n if not _valid_protocol_type(dictionary['protocol']):\n raise ClowderYAMLError(fmt.invalid_protocol_error(dictionary['protocol'], yaml_file))",
"def validate_request(self, data, bounce_back_address):\n error_codes = []\n\n # Check the size of the request\n if len(data) != 6:\n error_codes.append(1)\n\n # Check that the MagicNo field contains 0x497E\n elif ((data[0] << 8) | data[1]) != 0x497E:\n error_codes.append(2)\n\n # Check that the PacketType field contains 0x0001\n elif ((data[2] << 8) | data[3]) != 0x0001:\n error_codes.append(3)\n\n # Check that the RequestType field contains either 0x0001 or 0x0002\n elif ((data[4] << 8) | data[5]) not in cfg.COMMAND_TYPES:\n error_codes.append(4)\n\n if len(error_codes) == 0:\n print(responses.SUCCESS_REQUEST_VALID.format(\n bounce_back_address[0], bounce_back_address[1], data))\n return True\n\n else:\n return False",
"def validate(self, data):\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data",
"def parse_protocol_header(stream: BytesIO) -> Tuple[int, int, int]:\n prefix, *version = unpack('>5sBBB', _read(stream, 8))\n if prefix != b'AMQP\\x00':\n raise ValueError(\"wrong protocol, expected b'AMQP\\x00', got {}\".format(\n prefix\n ))\n return version",
"def post_process(self, packet: 'dict[str, Any]') -> 'MPTCP':\n ret = self.data\n\n ret.option = Enum_Option.Multipath_TCP\n ret.length = self.test['length']\n ret.subtype = Enum_MPTCPOption.get(packet['test']['subtype'])\n\n return ret",
"def supports_protocol(obj, protocol):\n manager = get_global_adaptation_manager()\n return manager.supports_protocol(obj, protocol)",
"def is_encapsulated(self):\n return self.is_compressed",
"def proto_check(proto):\n # Check for TCP\n if proto == 6:\n return 'tcp'\n # Check for UDP\n elif proto == 17:\n return 'udp'\n else:\n return None",
"def process_negotiation_response(self, message: str) -> bool:\n logger.debug(\"Process Negotiation: {}\".format(message))\n\n self.crypto.cipher = message['chosen_algorithms']['symmetric_cipher']\n self.crypto.mode = message['chosen_algorithms']['cipher_mode']\n self.crypto.digest = message['chosen_algorithms']['digest']\n\n logger.info(\"Chosen algorithms: {} {} {}\".format(self.crypto.cipher,self.crypto.mode,self.crypto.digest))",
"def auth_protocol_in(self, auth_protocol_in):\n\n self._auth_protocol_in = auth_protocol_in",
"def validateSSL(self):\n return self.__validate_ssl",
"def _validate_data_size(self, byte_data, byte_size, width, height) -> None:\n expected_size = width * height * self._component_size * self._components\n if byte_size != expected_size:\n raise ValueError(\n f\"Data size {len(byte_data)} does not match expected size {expected_size}\"\n )\n if len(byte_data) != byte_size:\n raise ValueError(\n f\"Data size {len(byte_data)} does not match reported size {expected_size}\"\n )",
"def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True",
"def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]",
"def validate_frame_length(frame_length, algorithm):\n if frame_length < 0 or frame_length % algorithm.encryption_algorithm.block_size != 0:\n raise SerializationError(\n \"Frame size must be a non-negative multiple of the block size of the crypto algorithm: {block_size}\".format(\n block_size=algorithm.encryption_algorithm.block_size\n )\n )\n if frame_length > aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE:\n raise SerializationError(\n \"Frame size too large: {frame} > {max}\".format(\n frame=frame_length, max=aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE\n )\n )",
"def preprocess_body(self) -> None:\n self._verify_archive_url_and_zip_path()\n self._verify_upload_url_and_zip_path()\n self._verify_upload_url_and_no_zip_path()\n if self.upload_function is None:\n self.upload_function = False",
"def __get_verify_mode(self):\n ...",
"def _check_vals(self):\n\n try:\n self.is_set = True\n self.pack()\n except Exception as err:\n # Set default values again\n raise ValueError(\"Invalid arguments. Could not packed since: {}\".format(err))\n self.__init__()",
"def _value_to_stored(value, min_compress_len):\n\n\t\tflags = 0\n\t\tif isinstance(value, str):\n\t\t\tpass\n\t\telif isinstance(value, int):\n\t\t\tflags |= Client._FLAG_INTEGER\n\t\t\tvalue = \"%d\" % value\n\t\t\t# Don't try to compress it\n\t\t\tmin_compress_len = 0\n\t\telif isinstance(value, long):\n\t\t\tflags |= Client._FLAG_LONG\n\t\t\tvalue = \"%d\" % value\n\t\t\t# Don't try to compress it\n\t\t\tmin_compress_len = 0\n\t\telse:\n\t\t\tflags |= Client._FLAG_PICKLE\n\t\t\tvalue = pickle.dumps(value, 0)\n\n\t\t# silently do not store if value length exceeds maximum\n\t\tif len(value) >= SERVER_MAX_VALUE_LENGTH:\n\t\t\treturn None\n\n\t\tif min_compress_len and _SUPPORTS_COMPRESS and len(value) > min_compress_len:\n\t\t\t# Try compressing\n\t\t\tcompressed_value = compress(value)\n\n\t\t\t#Only retain the result if the compression result is smaller than the original.\n\t\t\tif len(compressed_value) < len(value):\n\t\t\t\tflags |= Client._FLAG_COMPRESSED\n\t\t\t\tvalue = compressed_value\n\n\t\treturn flags, value",
"def validate_http_request(request):\r\n if request != b'':\r\n # Divide the request line: [method, sp, url, version, cr lf]\r\n request = request.decode().split('\\r')[0]\r\n method = request.split()[0]\r\n url = request.split()[1]\r\n version = request.split()[2]\r\n if method == METHOD and version == VERSION:\r\n return True, url\r\n else:\r\n return False, None\r\n else:\r\n return True, None",
"def _check_parameter(self, data):\n return self._pre_process_record(data) is not None",
"def test_verify(self):\n self.encoder._verify = True\n self.assertTrue(self.encoder._verify)",
"def verify_payload():\n return True",
"def parse_valid(self):\n try:\n self.test_proto.parse()\n except avro.errors.ProtocolParseException: # pragma: no coverage\n self.fail(f\"Valid protocol failed to parse: {self.test_proto!s}\")",
"def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )",
"def set_version(self, protocol_version):\n self.version = protocol_version\n self.version_bytes = str(protocol_version).encode(\"latin1\")\n self.version_header = self.version_bytes + PROTOCOL_3x_HEADER\n if protocol_version == 3.2: # 3.2 behaves like 3.3 with type_0d\n # self.version = 3.3\n self.dev_type = \"type_0d\"\n elif protocol_version == 3.4:\n self.dev_type = \"v3.4\"",
"def _validate_input(self):\n self.data.validate()\n self.meta_hybridizer.validate_input()",
"def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False",
"def detect_encoding(self):\n if hasattr(self, '_detect_encoding'):\n return self._detect_encoding\n\n if self.data:\n self._detect_encoding = charlockholmes.detect(self.data)\n return self._detect_encoding",
"def decoder(self, contentType, decoder):\n pass",
"def validate_signature(self):\n return self.signature == 0xAA55",
"def validate_config(config: NeedlemanWunschAlgorithmConfig):\n\n parameters_names_list = [\"SAME\", \"DIFF\", \"GAP_PENALTY\", \"MAX_NUMBER_PATHS\", \"MAX_SEQ_LENGTH\"]\n\n for param_name in parameters_names_list:\n if not isinstance(config[param_name], int):\n return False, f\"Parameter {param_name} is not int!\"\n \n for param_name in parameters_names_list[0:3]:\n if config[param_name] == 0:\n return False, f\"Parameter {param_name} can not be equal to 0!\"\n\n for param_name in parameters_names_list[3:]:\n if config[param_name] < 1:\n return False, f\"Parameter {param_name} can not be less than 1!\"\n\n if config.SAME <= config.DIFF:\n return False, f\"Parameter SAME must be greater than parameter DIFF!\"\n\n if config.MAX_SEQ_LENGTH > constants.MAXIMUM_SEQ_LEN:\n return False, f\"Value of parameter MAX_SEQ_LENGTH is too big. It should be less than {constants.MAXIMUM_SEQ_LEN}\"\n\n if config.MAX_NUMBER_PATHS > constants.MAXIMUM_NUMBER_PATHS:\n return False, f\"Value of parameter MAX_NUMBER_PATHS is too big. It should be less than {constants.MAXIMUM_NUMBER_PATHS}\"\n\n return True, \"\"",
"def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol",
"def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))",
"def test_no_compress_compressed_response(self):\n self.resp[\"Content-Encoding\"] = \"deflate\"\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(r.content, self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"deflate\")",
"def protocol_in(self, protocol_in):\n\n self._protocol_in = protocol_in",
"def __init__(self, tb=None,\n #\n RDY_ma_start_compression=0,\n RDY_ma_get_input=0,\n mav_send_compressed_value=0,\n RDY_mav_send_compressed_value=0,\n RDY_ma_end_compression=0,\n mv_compression_done=0,\n RDY_mv_compression_done=0,\n ):\n#\n if RDY_ma_start_compression is not None and isinstance(RDY_ma_start_compression, int):\n RDY_ma_start_compression = BinaryValue(\n RDY_ma_start_compression, tb.RDY_bits, False)\n if RDY_ma_get_input is not None and isinstance(RDY_ma_get_input, int):\n RDY_ma_get_input = BinaryValue(\n RDY_ma_get_input, tb.RDY_bits, False)\n if mav_send_compressed_value is not None and isinstance(mav_send_compressed_value, int):\n mav_send_compressed_value = BinaryValue(\n mav_send_compressed_value, tb.output_bits, False)\n if RDY_mav_send_compressed_value is not None and isinstance(RDY_mav_send_compressed_value, int):\n RDY_mav_send_compressed_value = BinaryValue(\n RDY_mav_send_compressed_value, tb.RDY_bits, False)\n if RDY_ma_end_compression is not None and isinstance(RDY_ma_end_compression, int):\n RDY_ma_end_compression = BinaryValue(\n RDY_ma_end_compression, tb.RDY_bits, False)\n if mv_compression_done is not None and isinstance(mv_compression_done, int):\n mv_compression_done = BinaryValue(\n mv_compression_done, tb.bool_bits, False)\n if RDY_mv_compression_done is not None and isinstance(RDY_mv_compression_done, int):\n RDY_mv_compression_done = BinaryValue(\n RDY_mv_compression_done, tb.RDY_bits, False)\n#\n self.value = (\n RDY_ma_start_compression,\n RDY_ma_get_input,\n mav_send_compressed_value,\n RDY_mav_send_compressed_value,\n RDY_ma_end_compression,\n mv_compression_done,\n RDY_mv_compression_done\n )",
"def checkSize(self, package):\n\n if len(package) > self.MAX_LENGTH:\n package = pickle.dumps(Failure(protocols.MessageSizeError()),2)\n return package",
"def test_restricted_to_protocols_updated(self):\n assert self.connection_config.restricted_to_protocols == {self.new_protocol_id}",
"def validate(self, params: Dict = None) -> None:\n masking_char = params.get(self.MASKING_CHAR)\n validate_parameter(masking_char, self.MASKING_CHAR, str)\n if len(masking_char) > 1:\n raise InvalidParamException(\n f\"Invalid input, {self.MASKING_CHAR} must be a character\"\n )\n\n validate_parameter(params.get(self.CHARS_TO_MASK), self.CHARS_TO_MASK, int)\n validate_parameter(params.get(self.FROM_END), self.FROM_END, bool)",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def __init__(self):\n # self.baud = baud\n # self.protocol = protocol\n pass",
"def test_op_no_compression(self):\n assert OP_NO_COMPRESSION == 0x20000",
"def _process_content_codings(self, chunk):\n content_codings = self.parsed_headers.get('content-encoding', [])\n content_codings.reverse()\n for coding in content_codings:\n # TODO: deflate support\n if coding in ['gzip', 'x-gzip'] and self._decode_ok:\n if not self._in_gzip_body:\n self._gzip_header_buffer += chunk\n try:\n chunk = self._read_gzip_header(\n self._gzip_header_buffer\n )\n self._in_gzip_body = True\n except IndexError:\n return '' # not a full header yet\n except IOError, gzip_error:\n self.add_note('header-content-encoding',\n rs.BAD_GZIP,\n gzip_error=str(gzip_error)\n )\n self._decode_ok = False\n return\n try:\n chunk = self._gzip_processor.decompress(chunk)\n except zlib.error, zlib_error:\n self.add_note(\n 'header-content-encoding', \n rs.BAD_ZLIB,\n zlib_error=str(zlib_error),\n ok_zlib_len=f_num(self.payload_sample[-1][0]),\n chunk_sample=chunk[:20].encode('string_escape')\n )\n self._decode_ok = False\n return\n else:\n # we can't handle other codecs, so punt on body processing.\n self._decode_ok = False\n return\n self._md5_post_processor.update(chunk)\n self.decoded_len += len(chunk)\n return chunk",
"def post_validate_struct(self, struct):\n pass",
"def after_encoding_negotiation(self, status):\n if status.cancelled():\n self.log.debug('encoding negotiation cancelled')\n return",
"def _validate_http_request(self):\n if self.path != '/':\n print('Invalid request path:', self.path)\n self.send_error(HTTPStatus.NOT_FOUND, 'Request Must Have Path Of /')\n raise ValueError\n\n content_type = self.headers.get('Content-Type', None)\n if content_type != 'application/json':\n print('Invalid request Content-Type:', self.path)\n self.send_error(HTTPStatus.BAD_REQUEST, 'Content-Type Must Be application/json')\n raise ValueError",
"def check_encoder(self):\n file = Path(self.get_encoder_path() + \".data-00000-of-00001\")\n\n return file.exists()",
"def _validate(self, value):\n return True",
"def _check_paramindevice(self):\n if self._check_validparam(self.symbol):\n param = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '=' is used\n if self._is_equal(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if value is valid\n if self._is_number(self.symbol):\n value = self.symbol\n self.symbol = self.scanner.get_symbol()\n return param, value\n else:\n # The parameter value is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '='\n self._display_syntax_error(\"equal\")\n self._semicolon_skipper()\n return None, None\n else:\n # The parameter type is not valid\n self._display_syntax_error(\"parameter\")\n self._semicolon_skipper()\n return None, None",
"def _validate_ens_net_portsecurity(self, net_data):\n pass",
"def __init__(self, protocol):\r\n self._protocol = protocol",
"def transportprotocol(self, transportprotocol) :\n\t\ttry :\n\t\t\tself._transportprotocol = transportprotocol\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_invalid_blocksize(self):\n self.default_kwargs['blocksize'] = 1000000\n self.encoder = StreamEncoder(**self.default_kwargs)\n with self.assertRaisesRegex(EncoderInitException, 'FLAC__STREAM_ENCODER_INIT_STATUS_INVALID_BLOCK_SIZE'):\n self.encoder._init()",
"def _decode(self):\n \n self.version = int(data_to_hex_str(self.packet[0])[2])\n self.header_len = int(data_to_hex_str(self.packet[0])[3]) * 4\n self.type_of_service = data_to_hex_str(self.packet[1:2])\n self.total_len = int(data_to_hex_str(self.packet[2:4]), 16)\n self.id = data_to_hex_str(self.packet[4:6])\n \n #parse the flags fields(reservedbit, don't fragment, more fragment)\n if ((ord(self.packet[6]) & (1 << 7)) != 0):\n self.flags_reservedbit = 1\n else:\n self.flags_reservedbit = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 6)) != 0):\n self.flags_dont_fragment = 1\n else:\n self.flags_dont_fragment = 0\n #endof if\n \n if ((ord(self.packet[6]) & (1 << 5)) != 0):\n self.flags_more_fragment = 1\n else:\n self.flags_more_fragment = 0\n #endof if\n \n #parse the offset field(in packet[6:7]): 00011111 & packet[6] (to filter flags) -->> get packet[6:7] in hex_str\n #tmp = str(31 & ord(self.packet[6]))\n self.fragment_offset = int(data_to_hex_str(self.packet[6:8]), 16)\n if (self.fragment_offset >= (1 << 13)):\n #take away the flags fields: 00011111 11111111 & self.fragment_offset\n self.fragment_offset = self.fragment_offset & ((1 << 13) - 1) \n \n self.TTL = ord(self.packet[8])\n self.protocol = IPPROTO[ord(self.packet[9])]\n self.header_checksum = data_to_hex_str(self.packet[10:12])\n \n self.src = str(ord(self.packet[12])) + '.' + str(ord(self.packet[13])) + '.' + \\\n str(ord(self.packet[14])) + '.' + str(ord(self.packet[15]))\n self.dst = str(ord(self.packet[16])) + '.' + str(ord(self.packet[17])) + '.' + \\\n str(ord(self.packet[18])) + '.' + str(ord(self.packet[19]))\n \n if (self.header_len > 20):\n self.opt_paddings = self.packet[20 : (self.header_len)]"
] |
[
"0.51522434",
"0.513435",
"0.5105154",
"0.50020367",
"0.4987997",
"0.49288347",
"0.48937827",
"0.48890725",
"0.48640823",
"0.48285913",
"0.48197266",
"0.47871244",
"0.47853112",
"0.47634792",
"0.4761809",
"0.47489363",
"0.47361204",
"0.47114277",
"0.4680672",
"0.46734345",
"0.46563947",
"0.46507746",
"0.46429485",
"0.46197894",
"0.46024603",
"0.45702195",
"0.453306",
"0.45139205",
"0.4506355",
"0.4492941",
"0.4487423",
"0.4470945",
"0.44659147",
"0.44584614",
"0.44333625",
"0.44320863",
"0.4426356",
"0.44258633",
"0.44190878",
"0.44190124",
"0.44172466",
"0.44125426",
"0.4410112",
"0.44014096",
"0.4395469",
"0.43852672",
"0.43792933",
"0.43786404",
"0.43782446",
"0.4367582",
"0.43657926",
"0.43484995",
"0.43407035",
"0.43377995",
"0.43319732",
"0.4328108",
"0.43250978",
"0.4323297",
"0.4321647",
"0.43181577",
"0.43167442",
"0.4316235",
"0.43148217",
"0.43107834",
"0.43097886",
"0.4309296",
"0.43082756",
"0.43016624",
"0.4297396",
"0.4292933",
"0.4286779",
"0.42766225",
"0.42759028",
"0.42742422",
"0.42741093",
"0.42723596",
"0.42722693",
"0.42684245",
"0.4266035",
"0.42588",
"0.4248089",
"0.42477784",
"0.4246729",
"0.4244027",
"0.424275",
"0.424275",
"0.42419854",
"0.42412946",
"0.42387804",
"0.4233395",
"0.42301905",
"0.42282668",
"0.42255363",
"0.42245635",
"0.42220286",
"0.4220503",
"0.42181623",
"0.4217412",
"0.4217098",
"0.42125902"
] |
0.73079675
|
0
|
Handle a request from the device
|
def handle_inform(self, session, request):
# Verify the parameters
params = get_element('params', request)
# Default value for protocol_compression
protocol_compression = 'NONE'
response = {}
if params is not None:
try:
# Fetch inform parameters and load into session
self.fetch_inform_params(session, params)
# handle a possible subscriber id (MACless communication)
self.compare_protocol_versions(session)
# If protocol_compression method is provided, check if valid
self.handle_protocol_compression(session)
# Validate and check reason (event) for this session
self.handle_client_id(session)
# Parse provided protocol version parameters and check validity
self.handle_connection_event(session)
# Check for unknown parameters provided in RPC
for key in params:
if key not in RPCS.VALID_INFORM_PARAMETERS:
raise ClientRequestError("InvalidParameterName", data=key)
except ClientRequestError as inform_error:
next_state = inform_error.error['next_state']
error_message = {"error": inform_error.error['error']}
if inform_error.error_name == "InvalidClientId":
# As per defined in the protocol: Log in database
session['log'] = {'rc': 'error', 'msg': 'Invalid client_id value'}
LOG.debug("ExpectInform Error: " + str(inform_error))
return next_state, error_message
except DbException:
return (RPCS.ExpectInform, {
'error': {'code': -31997,
'message': 'Database access error'}})
# Everything is OK with Inform RPC
next_state = RPCS.ExpectRpc
response['result'] = {
'protocol_version': self.protocol_version,
'protocol_compression': protocol_compression
}
# No parameters provided with inform RPC
else:
next_state = RPCS.ExpectInform
response['error'] = {
'code': -32602, 'message': 'Invalid parameter'}
return next_state, response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)",
"async def handle_request(self, request: aioweb.request.Request):",
"def handle_request(self, request: HttpParser) -> None:\n raise NotImplementedError() # pragma: no cover",
"def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)",
"def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))",
"def handle_request(self,host,path,data=b''):\n\t\tself.response_code(5,\"Request handler not implemented\")",
"def serve(self, rq):\n # Call callback by key directly from socket\n request = rq['request']\n\n if request in self.callbacks :\n self.callbacks[request](rq)\n else :\n print \"unrecognised request\"",
"def serve(self, rq):\n # Call callback by key directly from socket\n request = rq['request']\n\n if request in self.callbacks :\n self.callbacks[request](rq)\n else :\n print \"unrecognised request\"",
"def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result",
"def HandleRequest(self):\n rmsg = dm.DeviceManagementRequest()\n length = int(self.headers.getheader('content-length'))\n rmsg.ParseFromString(self.rfile.read(length))\n\n logging.debug('gaia auth token -> ' +\n self.headers.getheader('Authorization', ''))\n logging.debug('oauth token -> ' + str(self.GetUniqueParam('oauth_token')))\n logging.debug('deviceid -> ' + str(self.GetUniqueParam('deviceid')))\n self.DumpMessage('Request', rmsg)\n\n request_type = self.GetUniqueParam('request')\n # Check server side requirements, as defined in\n # device_management_backend.proto.\n if (self.GetUniqueParam('devicetype') != '2' or\n self.GetUniqueParam('apptype') != 'Chrome' or\n (self.GetUniqueParam('deviceid') is not None and\n len(self.GetUniqueParam('deviceid')) >= 64)):\n return (400, 'Invalid request parameter')\n\n expected_error = self.GetExpectedError(request_type)\n if expected_error:\n return expected_error\n\n if request_type == 'register':\n response = self.ProcessRegister(rmsg.register_request)\n elif request_type == 'certificate_based_register':\n response = self.ProcessCertBasedRegister(\n rmsg.certificate_based_register_request)\n elif request_type == 'api_authorization':\n response = self.ProcessApiAuthorization(rmsg.service_api_access_request)\n elif request_type == 'unregister':\n response = self.ProcessUnregister(rmsg.unregister_request)\n elif request_type == 'policy':\n response = self.ProcessPolicy(rmsg, request_type)\n elif request_type == 'enterprise_check':\n response = self.ProcessAutoEnrollment(rmsg.auto_enrollment_request)\n elif request_type == 'device_state_retrieval':\n response = self.ProcessDeviceStateRetrievalRequest(\n rmsg.device_state_retrieval_request)\n elif request_type == 'status_upload':\n response = self.ProcessStatusUploadRequest(\n rmsg.device_status_report_request, rmsg.session_status_report_request)\n elif request_type == 'device_attribute_update_permission':\n response = self.ProcessDeviceAttributeUpdatePermissionRequest()\n elif request_type == 'device_attribute_update':\n response = self.ProcessDeviceAttributeUpdateRequest()\n elif request_type == 'check_device_license':\n response = self.ProcessCheckDeviceLicenseRequest()\n elif request_type == 'remote_commands':\n response = self.ProcessRemoteCommandsRequest()\n elif request_type == 'check_android_management':\n response = self.ProcessCheckAndroidManagementRequest(\n rmsg.check_android_management_request,\n str(self.GetUniqueParam('oauth_token')))\n elif request_type == 'register_browser':\n response = self.ProcessRegisterBrowserRequest(\n rmsg.register_browser_request)\n elif request_type == 'chrome_desktop_report':\n response = self.ProcessChromeDesktopReportUploadRequest(\n rmsg.chrome_desktop_report_request)\n elif request_type == 'app_install_report':\n response = self.ProcessAppInstallReportRequest(\n rmsg.app_install_report_request)\n else:\n return (400, 'Invalid request parameter')\n\n if isinstance(response[1], basestring):\n body = response[1]\n elif isinstance(response[1], google.protobuf.message.Message):\n self.DumpMessage('Response', response[1])\n body = response[1].SerializeToString()\n else:\n body = ''\n return (response[0], body)",
"def handle_request(self, data, peer):\n\n req = Request.parse(data)\n if req:\n handler = self.get_hanlder(\n req, (self.host, self.port), peer, \n self._retries, self._timeout\n )\n handler.run()",
"def handle(self):\n\t\ttry:\n\t\t\trequest_line = self.rfile.readline().decode(\"ascii\")\n\t\t\tassert request_line.endswith(\"\\r\\n\"), \"Request line must end in CRLF\"\n\t\t\tparts = request_line.strip().split()\n\t\t\tassert len(parts)==3, \"Invalid request line\"\n\t\t\thost, path, content_length = parts\n\t\t\tif (content_length:=int(content_length))>0:\n\t\t\t\tdata = self.rfile.read(content_length)\n\t\t\telse:\n\t\t\t\tdata = b''\n\t\t\tself.handle_request(host,path,data)\n\t\texcept AssertionError as e:\n\t\t\tself.response_code(4,e.args[0])",
"async def _handle_request(self, request: web.Request) -> web.Response:\n event = await request.json()\n # This handler will be called on the server thread. Call the external\n # handler on the app thread.\n self._main_loop.call_soon_threadsafe(self.handle_event, event)\n return web.Response(text=\"OK\")",
"async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response",
"def handle_request(self):\n\t\ttry:\n\t\t\tr,w,e=select.select([self.socket],[],[], 1.0)\n\t\t\tif not r:\n\t\t\t\treturn\n\t\t\trequest, client_address=self.socket.accept()\n\t\texcept:\n\t\t\treturn\t\t\n\t\t\n\t\ttry:\n\t\t\tif self.debug:\n\t\t\t\tprint \"got request\"\n\t\t\tself.process_request(request, client_address)\n\t\texcept:\n\t\t\tself.handle_error(request, client_address)",
"def handle(req):\n return logic(req)",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))",
"def handle_request(self, request):\n return self._first_handler(self, request)",
"def handle_request(self, query, request):\r\n request_pdu = None\r\n response_pdu = \"\"\r\n slave_id = None\r\n function_code = None\r\n func_code = None\r\n slave = None\r\n response = None\r\n\r\n try:\r\n # extract the pdu and the slave id\r\n slave_id, request_pdu = query.parse_request(request)\r\n if len(request_pdu) > 0:\r\n (func_code, ) = struct.unpack(\">B\", request_pdu[0])\r\n # 43 is Device Information\r\n if func_code == 43:\r\n # except will throw MissingKeyError\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n # get the slave and let him execute the action\r\n elif slave_id == 0:\r\n # broadcast\r\n for key in self._slaves:\r\n response_pdu = self._slaves[key].handle_request(request_pdu, broadcast=True)\r\n response = query.build_response(response_pdu)\r\n elif slave_id == 255:\r\n r = struct.pack(\">BB\", func_code + 0x80, 0x0B)\r\n response = query.build_response(r)\r\n else:\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n except (IOError, MissingKeyError) as e:\r\n # If the request was not handled correctly, return a server error response\r\n r = struct.pack(\">BB\", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE)\r\n response = query.build_response(r)\r\n\r\n if slave:\r\n function_code = slave.function_code\r\n\r\n return (response, {'request': request_pdu.encode('hex'),\r\n 'slave_id': slave_id,\r\n 'function_code': function_code,\r\n 'response': response_pdu.encode('hex')})",
"def request_handler(self, client_connection):\n request = client_connection.recv(1024)\n\n #Make sure we recieved some data before proceeding\n if not request:\n response = 'Empty request'\n http_code = 400\n else:\n response, http_code = self.parse_request(request)\n\n #print response\n self.send_response(client_connection, response, http_code)",
"def handle(self):\n # Since we only expect small packets, we don't need to read a buffer of\n # size, say, 4096.\n data = self.request.recv(64)\n\n logging.debug(\"Received '%s' from '%s'\", repr(data), self.client_address[0])\n\n if data[0:2] == \"\\x01\\x00\":\n # Authentication attempt. Since this address is whitelisted (as it\n # was successfully verified by the server in `verify_request`), we\n # can safely acknowledge the authentication.\n client = None\n\n try:\n client = self.server.authenticator.add_new_client(\n self.client_address[0],\n data.split(\"\\x00\")[1]\n )\n\n except:\n logging.exception(\"An exception occurred creating a new client\")\n return\n\n\n self.server.storage_manager.add_client(client)\n\n self.request.send(\"\\x01\\x00%s\\x00%s\\x00%s\\x00\" % (\n client.token,\n self.server.receiver_address[0],\n self.server.receiver_address[1]\n )\n )\n\n else:\n logging.info(\"Unknown request '%s' from '%s'\", \n data, self.client_address[0])\n\n self.request.send(\"\")",
"def process_request(self, req, resp, resource, params):",
"def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])",
"def handler(self):\n\t\treturn self.handle_request",
"def request(self, msg):\n\t\tif msg.command in ('AUTH', 'EXIT', 'GET', 'SET', 'VERSION', 'COMMAND', 'UPLOAD'):\n\t\t\tmethod = 'handle_request_%s' % (msg.command.lower(),)\n\t\telse:\n\t\t\tmethod = 'handle_request_unknown'\n\n\t\tself.execute(method, msg)",
"def handle_request( method, uri, version, headers, body=None, chunk=None,\n trailers=None ):",
"def handle_one_request(self):\n import socket\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n if not self.raw_requestline:\n self.close_connection = 1\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n ##### Customization\n # origin\n \"\"\"\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n \"\"\"\n # now\n #import pdb; pdb.set_trace()\n self.delegate(self.get_environ(), self.gen_response, self.send_error)\n\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout, e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return",
"def _process_request(self, request, response):\n ...",
"def handle(self, request):\n\t\tCORE.info('Incoming request of type %s' % (request.command,))\n\t\tif not self.authenticated and request.command != 'AUTH':\n\t\t\tself.request(request)\n\t\telif request.command == 'AUTH':\n\t\t\tfrom univention.management.console.protocol.server import Server\n\t\t\tServer.reload()\n\t\t\ttry:\n\t\t\t\tself.__auth.authenticate(request)\n\t\t\texcept (TypeError, KeyError):\n\t\t\t\tresponse = Response(request)\n\t\t\t\tresponse.status = 400\n\t\t\t\tself._response(response)\n\t\telif request.command == 'GET' and 'newsession' in request.arguments:\n\t\t\tCORE.info('Renewing session')\n\t\t\tif self.processor:\n\t\t\t\tself.__locale = str(self.processor.locale)\n\t\t\tself.processor = None\n\t\t\tself.finished(request.id, None)\n\t\telse:\n\t\t\tself.initalize_processor(request)\n\t\t\tself.processor.request(request)",
"def handleRequest(self, access_id, msg):\n log.msg('handling engine request for %s' % access_id)\n try:\n engine_client = yield self.backend.getEngine(access_id)\n log.msg('got engine Client %s' % str(engine_client))\n except InvalidAccessId:\n err = {'status':'ERR', 'response':'InvalidAccessId'}\n log.err('InvalidAccessId %s' % access_id)\n defer.returnValue(err)\n\n result = yield engine_client.send(msg)\n sucs = {'status':'OK', 'response':result}\n defer.returnValue(sucs)",
"def handle_one_request(self):\n \n try:\n \n self.raw_requestline = self.rfile.readline(65537)\n \n if len(self.raw_requestline) > 65536:\n \n self.requestline = ''\n \n self.request_version = ''\n \n self.command = ''\n \n self.send_error(414)\n \n return\n \n if not self.raw_requestline:\n \n self.close_connection = 1\n \n return\n \n if not self.parse_request():\n \n # An error code has been sent, just exit\n \n return\n \n mname = 'do_' + self.command\n \n if not hasattr(self, mname):\n \n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n \n return\n \n method = getattr(self, mname)\n \n print \"before call do_Get\"\n \n method()\n \n #增加 debug info 及 wfile 判断是否已经 close\n \n print \"after call do_Get\"\n \n if not self.wfile.closed:\n self.wfile.flush() #actually send the response if not already done.\n \n print \"after wfile.flush()\"\n \n except socket.timeout, e:\n \n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return",
"def process_request(self, req):\n return None",
"def __handle_pkt_request(self, current_client, data):\n request_type = int(data.request_type)\n\n try:\n if data.data:\n del data.data['_io']\n self.__requests_manager.dispatch_request_handler(current_client, request_type, **data.data)\n else:\n self.__requests_manager.dispatch_request_handler(current_client, request_type)\n except RevetherServerErrorWithCode as e:\n # Update the client about the failure\n current_client.send_pkt(create_request_packet(e.code))\n self.__logger.warning(\"Error while handling client request: {}\".format(e))",
"def process_request(self, request):\n raise NotImplementedError('process_request not implemented in BaseService')",
"def handle_request(self, path=None):\n req = get_request()\n resp = super().handle_request(req)\n return to_response(resp)",
"def _handle_request(self, info, desired=None):\r\n debug_print('%s request:' % info.name)\r\n\r\n editor = info.editor\r\n if ((not editor.is_python_like())\r\n or sourcecode.is_keyword(info.obj)\r\n or editor.in_comment_or_string()):\r\n desired = 'fallback'\r\n\r\n self.pending = (info, desired)\r\n if not self.busy:\r\n self._handle_pending()",
"def process_request(self, req, resp):\n if req.method == \"GET\" or req.method == \"POST\":\n log.info((thisFilename, inspect.currentframe().f_code.co_name, req.path, \"params\", str(req.params)))\n if req.method == \"POST\":\n log.info((thisFilename, inspect.currentframe().f_code.co_name, req.path, \"media\", str(req.media)))\n if req.method != \"OPTIONS\":\n if req.path not in ignoreProcessRequestForPath:\n if \"kartoon-fapi-incoming\" not in req.params:\n resp.media = {\n \"responseId\": 103,\n \"message\": \"Invalid request\"\n }\n # exit request\n resp.complete = True\n else:\n req.params[\"kartoon-fapi-incoming\"] = json.loads(req.params[\"kartoon-fapi-incoming\"])\n if req.params[\"kartoon-fapi-incoming\"][\"secretKey\"] != FapiToBapiSecret:\n resp.media = {\n \"responseId\": 109,\n \"message\": \"Unauthorized access\"\n }\n # exit request\n resp.complete = True",
"def process_request(self, req, resp):\n pass\n \"\"\"TODO: to be defined1.\n\n :Process the request before routing it.\n\n Args:: TODO\n\n \"\"\"\n # self._Process the request before routing it.\n\n # Args: = Process the request before routing it.\n\n # Args:",
"async def post(self):\n\n # decrypt the request\n payload = decrypt_message(self.request.body, self.fernet_secret)\n if not payload:\n raise tornado.web.HTTPError(status_code=401)\n\n # ignore all requests for echo to this handler\n if payload[\"request\"] == \"echo\":\n LOGGER.error(\"This handler can't echo things.\")\n raise tornado.web.HTTPError(status_code=400)\n\n # get the request ID\n reqid = payload.get(\"reqid\")\n if reqid is None:\n raise ValueError(\n \"No request ID provided. \" \"Ignoring this request.\"\n )\n\n # rate limit the request if this is turned on\n if self.ratelimits:\n # get the frontend client IP addr\n frontend_client_ipaddr = payload.get(\"client_ipaddr\")\n\n if not frontend_client_ipaddr:\n LOGGER.error(\n \"[%s] request: '%s' is missing a payload \"\n \"value: 'client_ipaddr' \"\n \"needed to calculate rate, dropping this request.\"\n % (reqid, payload[\"request\"])\n )\n raise tornado.web.HTTPError(status_code=400)\n\n self.ratelimit_request(\n reqid,\n payload[\"request\"],\n frontend_client_ipaddr,\n request_body=payload[\"body\"],\n )\n\n # if we successfully got past host, decryption, rate-limit validation,\n # then process the request\n try:\n\n #\n # dispatch the action handler function\n #\n\n # inject the request ID into the body of the request so the backend\n # function can report on it\n payload[\"body\"][\"reqid\"] = reqid\n\n # inject the PII salt into the body of the request as well\n payload[\"body\"][\"pii_salt\"] = self.pii_salt\n\n #\n # validate the request and choose the function to dispatch\n #\n handler_func, problems, validate_msgs = validate_and_get_function(\n payload[\"request\"], payload[\"body\"]\n )\n\n if handler_func is None:\n problems[\"failure_reason\"] = \"invalid request parameters\"\n response = {\n \"success\": False,\n \"response\": problems,\n \"messages\": [validate_msgs],\n }\n else:\n # inject the config object into the backend function call\n # this passes along any secrets or settings from environ\n # directly to those functions\n backend_func = partial(\n handler_func, payload[\"body\"], config=self.config\n )\n # run the function associated with the request type\n loop = tornado.ioloop.IOLoop.current()\n response = await loop.run_in_executor(\n self.executor,\n backend_func,\n )\n\n #\n # see if the request was one that requires an email and password. in\n # this case, we'll apply backoff to slow down repeated failed\n # passwords\n #\n passcheck_requests = {\"user-login\", \"user-passcheck-nosession\"}\n\n if (\n payload[\"request\"] in passcheck_requests\n and response[\"success\"] is False\n ):\n\n (\n failure_status,\n failure_count,\n failure_wait,\n ) = await self.handle_failed_logins(payload)\n\n # if the user is locked for repeated login failures, handle that\n if failure_status == \"locked\":\n response = await self.lockuser_repeated_login_failures(\n payload, unlock_after_seconds=self.config.userlocktime\n )\n elif failure_status == \"wait\":\n LOGGER.warning(\n \"[%s] User with email: %s is being rate-limited \"\n \"after %s failed login attempts. \"\n \"Current wait time: %.1f seconds.\"\n % (\n reqid,\n pii_hash(payload[\"body\"][\"email\"], self.pii_salt),\n failure_count,\n failure_wait,\n )\n )\n\n # reset the failed counter to zero for each successful attempt\n elif (\n payload[\"request\"] in passcheck_requests\n and response[\"success\"] is True\n ):\n\n self.failed_passchecks.pop(payload[\"body\"][\"email\"], None)\n\n #\n # trim the failed_passchecks dict\n #\n if len(self.failed_passchecks) > 1000:\n self.failed_passchecks.pop(self.failed_passchecks.keys()[0])\n\n #\n # form and send the response\n #\n await self.send_response(response, reqid)\n\n except Exception:\n\n LOGGER.exception(\"Failed to understand request.\")\n raise tornado.web.HTTPError(status_code=400)",
"def process(self, request):\n pass",
"def handle_request(self, request, **resources):\r\n if not request.method in self._meta.callmap.keys():\r\n raise HttpError(\r\n 'Unknown or unsupported method \\'%s\\'' % request.method,\r\n status=status.HTTP_501_NOT_IMPLEMENTED)\r\n\r\n # Get the appropriate create/read/update/delete function\r\n view = getattr(self, self._meta.callmap[request.method])\r\n\r\n # Get function data\r\n return view(request, **resources)",
"def handleRequest(self, s, request):\n client = self.connections[s]\n\n try:\n logging.info(\"HANDLING message from %s: %r\", client, repr(request))\n\n try:\n req = json.loads(request)\n req = dic_b64_and_pickle_loads(req)\n except:\n return\n\n if not isinstance(req, dict):\n return\n\n if 'type' not in req:\n return\n\n if req['type'] == 'ack':\n return # Ignore for now\n\n client.send({'type': 'ack'})\n\n if req['type'] == 'connect':\n self.processConnect(client, req)\n elif req['type'] == 'secure':\n self.processSecure(client, req)\n\n except Exception:\n logging.exception(\"Could not handle request\")",
"def handle_event(event, context):\n print(\"Executing...\")\n router = Router(ROUTE_MAP)\n return router.route_request(event, context)",
"def handle_request_from(self, user, request):\n request_type = request.request_type\n\n if request_type in self._plain_requests:\n ret = Response(\n request_type,\n data=self._plain_requests[request_type]()\n )\n elif request_type in self._user_requests and not user:\n ret = self._no_user_response(request_type)\n elif request_type in self._user_requests:\n ret = Response(\n request_type,\n data=self._user_requests[request_type](user)\n )\n else:\n ret = self._complex_requests[request_type](user, request.data)\n\n if ret.success:\n self._operation_count = \\\n (self._operation_count + 1) % self._save_frequency\n if self._operation_count == 0:\n self._users.commit()\n\n return ret",
"async def handle_live(request: web.Request) -> web.Response:\n # pylint: disable=unused-argument\n return web.Response(text=\"OK\")",
"def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()",
"def handle_request(self, t):\n\n # respect ur elders\n super(StoreServer,self).handle_request(t)\n\n # convert out tuple to a request\n request = StoreRequest(t)\n\n # come up w/ a response for them\n response = StoreResponse(key=request.key,\n url=self.get_url(),\n port=self.get_port())\n\n # now spin up a handler for that port\n handler = StoreHandler(self,(response.url,response.port))\n\n # send back our response\n self.bb_client.put(response)",
"def handle(self):\n try:\n # First, send a response to allow the server to continue.\n rsp = \"220 dictserver <xnooptions> <msgid@msgid>\\n\"\n self.request.sendall(rsp.encode(\"utf-8\"))\n\n # Receive the request.\n data = self.request.recv(1024).strip()\n log.debug(\"[DICT] Incoming data: %r\", data)\n\n if VERIFIED_REQ in data:\n log.debug(\"[DICT] Received verification request from test \"\n \"framework\")\n response_data = VERIFIED_RSP.format(pid=os.getpid())\n else:\n log.debug(\"[DICT] Received normal request\")\n response_data = \"No matches\"\n\n # Send back a failure to find.\n response = \"552 {0}\\n\".format(response_data)\n log.debug(\"[DICT] Responding with %r\", response)\n self.request.sendall(response.encode(\"utf-8\"))\n\n except IOError:\n log.exception(\"[DICT] IOError hit during request\")",
"def event_handler(self, response):\n pass",
"def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client",
"def process_request(self, request):\n return None",
"def process_request(self, request):\n request_type, data = request.request_type, request.data\n if request_type is Request.Type.LOG_IN:\n return self._log_in(data)\n elif request_type is Request.Type.LOG_OUT:\n return self._log_out()\n else:\n return self._parent.handle_request_from(self._user, request)",
"def requestReceived(self, command, path, version):\n if command == 'POST':\n self.requestReceivedPOST(path, version)\n else:\n server.Request.requestReceived(self, command, path, version)",
"def _request(self, *args):\n raise NotImplementedError",
"async def _response_handler(self):",
"def request(self, flow: mitmproxy.http.HTTPFlow):",
"def request(self, flow: mitmproxy.http.HTTPFlow):",
"def _handle( self, state, msg ):\n\t\tstate.requests[ msg.id ] = msg\n\t\tstatistics.requests.new()\n\t\tCORE.info( 'Incoming request of type %s' % msg.command )\n\t\tif not state.authenticated and msg.command != 'AUTH':\n\t\t\tres = Response( msg )\n\t\t\tres.status = BAD_REQUEST_UNAUTH\n\t\t\tself._response( res, state )\n\t\telif msg.command == 'AUTH':\n\t\t\tstate.authResponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tstate.authenticate( msg.body[ 'username' ], msg.body[ 'password' ] )\n\t\t\texcept ( TypeError, KeyError ), e:\n\t\t\t\tstate.authResponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\tstate.authResponse.message = 'insufficient authentification information'\n\t\telif msg.command == 'GET' and ( 'ucr' in msg.arguments or 'info' in msg.arguments ):\n\t\t\tresponse = Response( msg )\n\t\t\tresponse.result = {}\n\t\t\tresponse.status = SUCCESS\n\t\t\tif 'ucr' in msg.arguments:\n\t\t\t\tif not isinstance(msg.options, (list, tuple)):\n\t\t\t\t\traise InvalidOptionsError\n\t\t\t\tfor value in msg.options:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif not value:\n\t\t\t\t\t\t\t# make sure that 'value' is non-empty\n\t\t\t\t\t\t\tCORE.warn('Empty UCR variable requested. Ignoring value...')\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif value.endswith('*'):\n\t\t\t\t\t\t\tvalue = value[ : -1 ]\n\t\t\t\t\t\t\tfor var in filter( lambda x: x.startswith( value ), ucr.keys() ):\n\t\t\t\t\t\t\t\tresponse.result[ var ] = ucr.get( var )\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresponse.result[ value ] = ucr.get( value )\n\t\t\t\t\texcept ( TypeError, IndexError, AttributeError ), e:\n\t\t\t\t\t\tCORE.warn('Invalid UCR variable requested: %s' % (value,))\n\t\t\t\t\t\tresponse.status = BAD_REQUEST_INVALID_OPTS\n\t\t\t\t\t\tresponse.message = _('Invalid UCR variable requested: %s') % (value,)\n\n\t\t\telif 'info' in msg.arguments:\n\t\t\t\ttry:\n\t\t\t\t\tfd = gzip.open( '/usr/share/doc/univention-management-console-server/changelog.Debian.gz' )\n\t\t\t\t\tline = fd.readline()\n\t\t\t\t\tfd.close()\n\t\t\t\t\tmatch = MagicBucket.CHANGELOG_VERSION.match( line )\n\t\t\t\t\tif not match:\n\t\t\t\t\t\traise IOError\n\t\t\t\t\tresponse.result[ 'umc_version' ] = match.groups()[ 0 ]\n\t\t\t\t\tresponse.result[ 'ucs_version' ] = '{0}-{1} errata{2} ({3})'.format( ucr.get( 'version/version', '' ), ucr.get( 'version/patchlevel', '' ), ucr.get( 'version/erratalevel', '0' ), ucr.get( 'version/releasename', '' ) )\n\t\t\t\t\tresponse.result[ 'server' ] = '{0}.{1}'.format( ucr.get( 'hostname', '' ), ucr.get( 'domainname', '' ) )\n\t\t\t\t\tresponse.result[ 'ssl_validity_host' ] = int( ucr.get( 'ssl/validity/host', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\t\tresponse.result[ 'ssl_validity_root' ] = int( ucr.get( 'ssl/validity/root', '0' ) ) * 24 * 60 * 60 * 1000\n\t\t\t\texcept IOError:\n\t\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\t\t\tpass\n\n\t\t\tself._response( response, state )\n\t\telif msg.command == 'STATISTICS':\n\t\t\tresponse = Response( msg )\n\t\t\ttry:\n\t\t\t\tpwent = pwd.getpwnam( state.username )\n\t\t\t\tif not pwent.pw_uid in ( 0, ):\n\t\t\t\t\traise KeyError\n\t\t\t\tCORE.info( 'Sending statistic data to client' )\n\t\t\t\tresponse.status = SUCCESS\n\t\t\t\tresponse.result = statistics.json()\n\t\t\texcept KeyError:\n\t\t\t\tCORE.info( 'User not allowed to retrieve statistics' )\n\t\t\t\tresponse.status = BAD_REQUEST_FORBIDDEN\n\t\t\tself._response( response, state )\n\t\telse:\n\t\t\t# inform processor\n\t\t\tif not state.processor:\n\t\t\t\tstate.processor = Processor( *state.credentials() )\n\t\t\t\tcb = notifier.Callback( self._response, state )\n\t\t\t\tstate.processor.signal_connect( 'response', cb )\n\t\t\tstate.processor.request( msg )",
"def process_request(self, event, context):\n # if its a new session, run the new session code\n try:\n response = None\n if event['session']['new']:\n self.on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n # regardless of whether its new, handle the request type\n if event['request']['type'] == \"LaunchRequest\":\n response = self.on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n response = self.on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n response = self.on_session_ended(event['request'], event['session'])\n\n except Exception as exc:\n response = self.on_processing_error(event, context, exc)\n\n return response",
"def handle(self, content):\n # Check the API request\n serializer = ApiRequestSerializer(data=content)\n if not serializer.is_valid():\n return self.consumer.send_to_client(\n {\"topic\": \"api\", \"type\": \"error\", \"message\": \"invalid-request\"}\n )\n\n # Make request\n method = serializer.validated_data[\"method\"]\n url = serializer.validated_data[\"url\"]\n payload = serializer.validated_data.get(\"payload\", None)\n logger.info(\"API {}:{}:{}\".format(method, url, payload))\n\n response = getattr(self.client, method)(url, data=payload, follow=True)\n\n # Return to client\n # API response\n to_client = {\n \"topic\": \"api\",\n \"type\": \"response\",\n \"status_code\": response.status_code,\n }\n if response.get(\"Content-Type\") == \"application/json\":\n to_client[\"content\"] = response.json()\n else:\n to_client[\"content\"] = content\n\n # Original request params\n to_client.update({\"method\": method, \"url\": url})\n if payload is not None:\n to_client[\"payload\"] = payload\n\n self.consumer.send_to_client(to_client)",
"def handle(req):\n\n gpio.output(26, gpio.HIGH)\n time.sleep(0.2)\n gpio.output(26, gpio.LOW)\n\n return req",
"def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()",
"def _handle_request(self, method, url, handler):\n if not(method in self.handlers):\n handler.set_status(405) # Method Not Allowed\n handler.write({})\n return\n for (path, fn) in self.handlers[method].items():\n if re.match(path, url):\n fn(url, handler)\n return\n handler.set_status(404) # Not Found\n handler.write({})",
"def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)",
"def handleRequest(self, request):\n handler = getattr(self, 'openid_' + request.mode, None)\n if handler is not None:\n return handler(request)\n else:\n raise NotImplementedError(\n \"%s has no handler for a request of mode %r.\" %\n (self, request.mode))",
"def render(self, request):\n content = request.content.read()\n msg = json.loads(content)\n d = self.engine_bus.handleRequest(self.access_id, msg)\n d.addCallback(self._success, request)\n d.addErrback(self._fail, request)\n return server.NOT_DONE_YET",
"def send(self, request : str):\n pass",
"def __call__(self, *args, **kwargs):\n\t\treturn self.handler()(self.request(kwargs))",
"def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)",
"def handle_connection(conn):\n\n\ttry:\n\t\treq = http_parse_req(http_read(conn))\n\t\thandlers[req.method](conn, req)\n\texcept:\n\t\ttry:\n\t\t# Ignore nested exceptions, as we dont care if the 400\n\t\t# reaches the client or not\n\t\t\thttp_400(conn, b\"Invalid request\\n\")\n\t\texcept:\n\t\t\tpass",
"def handle(self):\n self.request.recv(1024)\n self.request.sendall(pickle.dumps(self.server.lymphocytes_getter()))",
"def handle(self): \n \n data = self.request[0].strip()\n self.socket = self.request[1]\n\n #split off first word of file, assume is filename\n filename,sep,data = data.partition(\" \")\n\n #assume is requesting file\n if not data:\n self.sendfile(filename)\n #assume we have to save the file since data was sent\n else:\n self.savefile(filename,data)\n\n return True",
"def serve(self, request, *args, **kwargs):\n raise Http404",
"def serve(self, request, *args, **kwargs):\n raise Http404",
"def serve(self, request, *args, **kwargs):\n raise Http404",
"def serve(self, request, *args, **kwargs):\n raise Http404",
"def serve(self, request, *args, **kwargs):\n raise Http404",
"def run(self):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n # allow anyone else to handle the request at this point\n handled = tools.run_callback(\"handle\", \n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n if not handled == 1:\n blosxom_handler(self._request)\n\n # do end callback\n tools.run_callback(\"end\", {'request': self._request})",
"def handle(self, data):\n pass",
"def execute_request(self, given_request: Request):\n last_handler = None\n if given_request.output != \"print\":\n last_handler = WriteToFileHandler()\n if given_request.encryption_state == CryptoMode.EN:\n self.start_handler = EncryptDataHandler()\n elif given_request.encryption_state == CryptoMode.DE:\n self.start_handler = DecryptDataHandler()\n else:\n print(\"Wrong Crypto Mode\")\n return False\n self.start_handler.next_handler = last_handler\n if given_request.input_file:\n buffer_handler = self.start_handler\n self.start_handler = ReadDataFromFileHandler()\n self.start_handler.next_handler = buffer_handler\n return self.start_handler.handle_request(given_request)",
"def handle_request(self):\n\n callback_obj = getattr(self, \"get_%s\" % (self.tab_type))\n return callback_obj()",
"def handle_request_get(self, msg):\n\n\t\tfor arg in msg.arguments:\n\t\t\tmethod = {\n\t\t\t\t'ucr': self.handle_request_get_ucr,\n\t\t\t\t'meta': self.handle_request_get_meta,\n\t\t\t\t'info': self.handle_request_get_info,\n\t\t\t\t'modules/list': self.handle_request_get_modules,\n\t\t\t\t'modules': self.handle_request_get_modules,\n\t\t\t\t'categories/list': self.handle_request_get_categories,\n\t\t\t\t'categories': self.handle_request_get_categories,\n\t\t\t\t'user/preferences': self.handle_request_get_user_preferences,\n\t\t\t\t'hosts/list': self.handle_request_get_hosts,\n\t\t\t\t'hosts': self.handle_request_get_hosts,\n\t\t\t}.get(arg)\n\t\t\tif method:\n\t\t\t\tself.finished(msg.id, method(msg))\n\t\t\t\treturn\n\t\traise NotFound()",
"def handler(data, context):\n processed_input = _process_input(data, context)\n response = requests.post(context.rest_uri, data=processed_input)\n return _process_output(response, context)",
"def request() -> None:\n\t_flag.set()",
"def HandlePingRequest(self, request, response):\n self._publish_helper.HandlePingRequest(request, response)",
"def __call__(self, request):\n response = self.get_request(request)\n return response",
"def request(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def route( request, c ):",
"def process_request(self, request, client_address):\n\t\tself.finish_request(request, client_address)\n\t\tself.close_request(request)",
"def dispatch(intent_request):\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'VCH_Policies':\n return respond(intent_request, 'Policies')\n\n raise Exception('Intent with name ' + intent_name + ' not supported')",
"def handle_key_request(self, event):\n if event['sender'] != self.user_id:\n logger.info(\"Ignoring m.room_key_request event from %s.\", event['sender'])\n return\n\n content = event['content']\n device_id = content['requesting_device_id']\n if device_id == self.device_id:\n return\n try:\n self.olm_device.device_keys[self.user_id][device_id]\n except KeyError:\n logger.info(\"Ignoring m.room_key_request event from device %s, which \"\n \"we don't own.\", device_id)\n return\n\n # Build a queue of key requests as we don't want to tell client of each requests,\n # knowing that the canceling event might be coming right up next.\n request_id = content['request_id']\n if content['action'] == 'request':\n body = content['body']\n if body['algorithm'] != self.olm_device._megolm_algorithm:\n return\n if request_id not in self.queued_key_requests[device_id]:\n self.queued_key_requests[device_id][request_id] = body\n elif content['action'] == 'cancel_request':\n # This doesn't remove request_id from the dict, so we will never\n # add an event with this request ID again.\n self.queued_key_requests[device_id][request_id].clear()",
"def _raw_device_action(self, request):\n url = \"/appservices/v6/orgs/{0}/device_actions\".format(self.credentials.org_key)\n resp = self.post_object(url, body=request)\n if resp.status_code == 200:\n return resp.json()\n elif resp.status_code == 204:\n return None\n else:\n raise ServerError(error_code=resp.status_code, message=\"Device action error: {0}\".format(resp.content))",
"def do_GET(self):\r\n self._send_handler_response('GET')",
"def _rest_call(self, data, action):\n path = '/wm/device/?ipv4=' + data\n conn = httplib.HTTPConnection(self.host, self.port)\n conn.request('GET', path)\n response = conn.getresponse()\n ret = (response.status, response.reason, response.read())\n conn.close()\n return ret",
"def get(self):\n self.post()",
"def request(self, url, *args, **kwargs):\n raise NotImplementedError",
"def handle(self):\n for request in self._each_msg():\n r_len, r_type = struct.unpack_from('> I B', request)\n\n if r_type == self.SSH2_AGENTC_REQUEST_IDENTITIES:\n response = self._merge_identities(request)\n elif r_type == self.SSH2_AGENTC_SIGN_REQUEST:\n # Extract key blob from request\n key_blob_len = struct.unpack_from('> I', request, 5)[0]\n key_blob = request[9:9 + key_blob_len]\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n\n agent = self._identity_map[hex_blob]\n\n if agent:\n if agent == self.server.alternate_agent:\n key_digest = self._key_digest(key_blob)\n LOG.info(\"identity %s used by %s: %s\", key_digest,\n self.username, self.process_info)\n\n response = agent.forward_request(request)\n else:\n response = \\\n self.server.default_agent.forward_request(request)\n else:\n response = self.server.default_agent.forward_request(request)\n\n self.request.sendall(response)",
"def process_request(self, request, client_address):\n self.finish_request(request, client_address)"
] |
[
"0.73241466",
"0.7304394",
"0.69043636",
"0.68481004",
"0.6837162",
"0.67897916",
"0.6763938",
"0.6763938",
"0.6755062",
"0.67278916",
"0.6716694",
"0.66799444",
"0.66772705",
"0.66192824",
"0.66020906",
"0.6588511",
"0.65734357",
"0.65640265",
"0.652787",
"0.65262365",
"0.6518451",
"0.6475598",
"0.64728314",
"0.647037",
"0.6405999",
"0.64050627",
"0.6397834",
"0.6354975",
"0.6331003",
"0.63268197",
"0.6308597",
"0.63032573",
"0.6296701",
"0.62722576",
"0.6262911",
"0.6261627",
"0.62321067",
"0.6217797",
"0.6212858",
"0.6210669",
"0.6193466",
"0.614902",
"0.6095024",
"0.60905695",
"0.6060283",
"0.6050134",
"0.60485023",
"0.604039",
"0.6031225",
"0.6029708",
"0.6028483",
"0.601989",
"0.6003542",
"0.59923685",
"0.5985915",
"0.5974869",
"0.5949223",
"0.59394413",
"0.5933718",
"0.5933718",
"0.5929686",
"0.592096",
"0.590979",
"0.5909236",
"0.5904734",
"0.5899721",
"0.58946395",
"0.58885294",
"0.58811814",
"0.58721906",
"0.5864026",
"0.58631706",
"0.5855381",
"0.58549815",
"0.585314",
"0.58447564",
"0.58447564",
"0.58447564",
"0.58447564",
"0.58447564",
"0.58276886",
"0.5821811",
"0.58070284",
"0.57859594",
"0.57829857",
"0.57828844",
"0.5771387",
"0.5760569",
"0.5751435",
"0.5724624",
"0.5723991",
"0.57203525",
"0.5697472",
"0.56960845",
"0.5695218",
"0.5694328",
"0.56854844",
"0.5682249",
"0.5675728",
"0.5659761",
"0.5656168"
] |
0.0
|
-1
|
Prepare session; attempts database connection, verify provided credentials and validate.
|
def prepare_session(self, writer, session):
# Get information about the CLIENT
session['client']['ip'], port = writer.get_extra_info('peername')
# Try Database connection
try:
self.connect_database(session)
except DbException as err:
LOG.debug('Could not prepare session: {!s}'.format(err))
return RPCS.Listening
# Get MAC Address (DNS field) from the certificate
peercert = writer.get_extra_info('peercert')
dns = get_element('subjectAltName', peercert)
try:
session['client']['did'] = dns[0][1] # currenlty not used but it should in the certificate
session['client']['mac'] = dns[1][1]
except IndexError:
# Disconnect when credentials cannot be obtained
LOG.info("Failed to obtain certificate credentials from ip " + session['client']['ip'])
return RPCS.Listening
return RPCS.ExpectInform
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"async def prepare(self):\n\n # Read the secure cookie which exists if we are in an authenticated\n # context (though not if the caimira webservice is running standalone).\n session = json.loads(self.get_secure_cookie('session') or 'null')\n\n if session:\n self.current_user = AuthenticatedUser(\n username=session['username'],\n email=session['email'],\n fullname=session['fullname'],\n )\n else:\n self.current_user = AnonymousUser()",
"def prepare_database(config):\n global Session\n engine = sqlalchemy.create_engine(config.db_string)\n session_factory = sqlalchemy.orm.sessionmaker(bind=engine)\n Session = sqlalchemy.orm.scoped_session(session_factory)",
"def __init__(self, *, username: str = None, password: str = None) -> None:\n LOG.debug(f\"Authenticating to PostgreSQL database using {pg_environment()}\")\n\n connect_params = {\n \"cursor_factory\": NamedTupleCursor,\n \"fallback_application_name\": fallback_application_name(),\n\n **({\"user\": username} if username is not None else {}),\n **({\"password\": password} if password is not None else {}),\n }\n\n try:\n # connect() requires a DSN as the first arg even if the connection\n # details are fully-specified by the environment, but we don't need to\n # fill it with anything.\n self.connection = psycopg2.connect(\"\", **connect_params)\n except DatabaseError as error:\n LOG.error(f\"Authentication failed: {error}\")\n raise error from None\n\n LOG.info(f\"Connected to {self.session_info()}\")",
"async def prepare(self):\n session_id = self.get_secure_cookie('suid')\n if session_id:\n try:\n session_service = SessionsService(db=self.db)\n session_service_get_res = await session_service.get(\n session_id=session_id.decode('utf-8')\n )\n if session_service_get_res['status_code'] == 200:\n self.session = session_service_get_res['data']\n\n except tornado.gen.TimeoutError as err:\n logger.critical(err, exc_info=True)\n loader = tornado.template.Loader(self.get_template_path())\n data = loader.load('timeout.html').generate(\n static_url=self.static_url)\n\n self.set_status(503)\n self.write(data)\n await self.finish()\n else:\n self.session = None",
"def setup_session():\n print(\"Setting up session\")\n engine = setup_engine()\n Base.metadata.bin = engine\n\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n\n return session",
"def _setup_connection(self):\r\n if self.connection.password:\r\n if not self.format_inline('AUTH', self.connection.password):\r\n raise AuthenticationError(\"Invalid Password\")\r\n self.format_inline('SELECT', self.connection.db)",
"def _reset_session(self):\n retries = self.__no_of_retries\n\n while retries > 0:\n if not self._is_session_valid():\n self._close()\n self._set_session()\n else:\n break\n retries -= 1\n else:\n raise DatabaseError.ConnectionError(\"Connection to database not available!\")",
"def init_session(self):\n pass",
"def init_session(self):\n pass",
"def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()",
"async def initialize(\n self,\n url: str,\n password: str | None,\n *,\n isolation_level: Optional[str] = None,\n ) -> None:\n if self._override_engine:\n self._session = await create_async_session(self._override_engine)\n else:\n self._engine = create_database_engine(\n url, password, isolation_level=isolation_level\n )\n self._session = await create_async_session(self._engine)",
"def _create_session(self):\n self.session = requests.Session() # pragma: no cover\n self.session.headers[\"Accept\"] = \"application/json\" # pragma: no cover\n if self.user: # pragma: no cover\n self.session.auth = (self.user, self.cred) # pragma: no cover",
"def setup(self):\n self.load_connection_info(self.ini_filename)\n if self.conn_info:\n self.logger.info('Load connection info of Postgres')\n\n psql_connection_info = f\"dbname={self.conn_info['dbname']} \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\" \n \n check_db = self.create_db(psql_connection_info)\n\n connection = psycopg2.connect((\n f\"dbname=password_manager \" \\\n f\"user={self.conn_info['user']} \" \\\n f\"password={self.conn_info['password']} \" \\\n f\"port={self.conn_info['port']}\")) \n cursor = connection.cursor()\n\n if check_db:\n self.logger.info('Database has been created')\n\n check_tables = self.create_tables(connection, \n cursor, \n self.sql_query_table_person, \n self.sql_query_table_login_data)\n \n if check_tables:\n self.logger.info('Tables have been created')\n else:\n self.logger.info('Tables do not exist')\n else:\n self.logger.info('Database does not exist')\n \n connection.close()\n cursor.close()\n else:\n self.logger.info('Connection to Postgres could not esablished')",
"def __init__(self):\n engine = create_engine(\"postgresql://postgres:1@localhost:5432/postgres\")\n session_class = sessionmaker(bind=engine)\n self.session = session_class()",
"def _connect(self):\n\n assert not self.__is_connected\n\n self._db_engine = create_engine(self._database.url)\n\n from sqlalchemy import event\n\n if self._database.driver == 'sqlite':\n @event.listens_for(self._db_engine, \"connect\")\n def do_connect(dbapi_connection, *args, **kwargs):\n # disable pysqlite's emitting of the BEGIN statement entirely.\n # also stops it from emitting COMMIT before any DDL.\n dbapi_connection.isolation_level = None\n\n @event.listens_for(self._db_engine, \"begin\")\n def do_begin(conn):\n # emit our own BEGIN\n conn.execute(\"BEGIN\")\n\n self._db_connection = self._db_engine.connect()\n self._db_transaction = self._db_connection.begin()\n\n self._orm_session = Session(\n bind=self._db_connection,\n transaction=self,\n )\n self._orm_session_proxy = weakref.proxy(self._orm_session)\n\n self.__is_connected = True",
"def set_connection(cls, user_name, password, end_point, session_verify):\n if not session_verify:\n requests.packages.urllib3.disable_warnings()\n\n cls.user_name = user_name\n cls.password = password\n cls.end_point = end_point\n\n cls.session = requests.Session()\n cls.session.auth = HTTPBasicAuth(user_name, password)\n cls.session.verify = session_verify",
"def initialize_db(password):\n print('Starting...')\n global conn\n\n try:\n conn = psycopg2.connect(database=\"postgres\",#default databse in postgreSQL\n user=\"postgres\",#username of postgreSQL\n password=password,#password of that user in postgreSQL\n host=\"127.0.0.1\",\n port=\"5432\")\n except psycopg2.OperationalError as exception:\n messagebox.showerror(\"password\",\"PASSWORD IS INCORRECT. TRY AGAIN...\")\n sys.exit()",
"def get_or_create_session(db):",
"def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None",
"def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None",
"def before_request():\r\n try:\r\n g.conn = engine.connect()\r\n except:\r\n print(\"uh oh, problem connecting to database\")\r\n import traceback; traceback.print_exc()\r\n g.conn = None",
"def maybe_start_new_session_after_checking_email_and_password(cls,\n pgconn, email_address, password):\n\n cursor = pgconn.cursor()\n\n cursor.execute(textwrap.dedent(\"\"\"\n insert into webapp_sessions\n (person_uuid)\n select person_uuid\n from people\n where email_address = %(email_address)s\n and salted_hashed_password = crypt(\n %(password)s,\n salted_hashed_password)\n and person_status = 'confirmed'\n returning (webapp_sessions.*)::webapp_sessions as gs\n \"\"\"), {\n \"email_address\": email_address,\n \"password\": password})\n\n if cursor.rowcount:\n return cursor.fetchone().gs",
"def __init__(self, session):\n self.session = session\n self.dbi = DBInterface(self.session)",
"def init_session(connection_string=None, drop=False):\n if connection_string is None:\n engine = create_engine('sqlite://',\n echo=False,\n connect_args={'check_same_thread':False},\n poolclass=StaticPool)\n else:\n engine = create_engine(connection_string)\n\n from database.model import Base\n\n global session\n\n if drop:\n try:\n old_session = session\n Base.metadata.drop_all(bind=old_session.bind)\n except:\n pass\n\n db_session = scoped_session(sessionmaker(autocommit=False,\n autoflush=False,\n bind=engine))\n Base.metadata.create_all(bind=engine)\n\n session = db_session",
"def attempt_login(database_address: str, username: str, password: str):\n dj.config['database.host'] = database_address\n dj.config['database.user'] = username\n dj.config['database.password'] = password\n\n # Attempt to connect return true if successful, false is failed\n try:\n dj.conn(reset=True)\n return dict(result=True)\n except Exception as e:\n return dict(result=False, error=e)",
"def initialize_if_needed(self):\n if self.session is None:\n raise RunnerTerminalSessionClosed()\n self.session.initialize_if_needed()",
"def __init__(self):\n engine = db_connect()\n self.Session = sessionmaker(bind=engine)",
"def __init__(self):\n # create a connection through our super role via db.connect\n try:\n self.connection = db.connect(SUPER_ROLE, authcode=SUPER_AUTHCODE, host=HOST)\n except db.OperationalError: # thrown if password or role don't match\n print 'Caught an exception while trying to log in, maybe your account does not exist yet?'\n exit()\n \n # get a DictCursor as our cursor (which returns queries as column-name dicts)\n self.cursor = self.connection.cursor(DictCursor)\n \n self.setup_tables()",
"def _connect(self):\n try:\n self.conn = psycopg2.connect(\n host=self.host,\n user=self.username,\n password=self.password,\n port=self.port,\n dbname=self.dbname\n )\n except psycopg2.DatabaseError as e:\n logger.error(e)\n raise e\n logger.info('Connection opened successfully.')",
"def _login(self, username=None, password=None, store_password=False,\n reenter_password=False):\n if username is None:\n if self.USERNAME == \"\":\n raise LoginError(\"If you do not pass a username to login(), \"\n \"you should configure a default one!\")\n else:\n username = self.USERNAME\n\n # login after logging out (interactive)\n if not hasattr(self, 'session'):\n self.session = requests.session()\n\n # login after login (interactive)\n if hasattr(self, 'username'):\n log.warning(\"Attempting to login while another user ({0}) \"\n \"is already logged in.\".format(self.username))\n self.check_login_status()\n return\n\n self.username = username\n\n # Get password from keyring or prompt\n self.password, password_from_keyring = self._get_password(\n \"astroquery:www.cosmosim.org\", username, reenter=reenter_password)\n\n # Authenticate\n warnings.warn(\"Authenticating {0} on www.cosmosim.org...\"\n .format(self.username))\n authenticated = self._request('POST', CosmoSim.QUERY_URL,\n auth=(self.username, self.password),\n cache=False)\n if authenticated.status_code == 200:\n warnings.warn(\"Authentication successful!\")\n elif (authenticated.status_code == 401\n or authenticated.status_code == 403):\n warnings.warn(\"Authentication failed!\")\n elif authenticated.status_code == 503:\n warnings.warn(\"Service Temporarily Unavailable...\")\n\n # Generating dictionary of existing tables\n self._existing_tables()\n\n if (authenticated.status_code == 200\n and password_from_keyring is None and store_password):\n keyring.set_password(\"astroquery:www.cosmosim.org\",\n self.username, self.password)\n\n # Delete job; prevent them from piling up with phase PENDING\n if authenticated.status_code == 200:\n soup = BeautifulSoup(authenticated.content, \"lxml\")\n self.delete_job(jobid=str(soup.find(\"uws:jobref\")[\"id\"]),\n squash=True)\n\n return authenticated",
"def test_get_db_session(initialized_db_url):\n with utils.get_db_session(initialized_db_url) as db:\n assert isinstance(db, Session)",
"def before_request():\n try:\n g.conn = engine.connect()\n except:\n print (\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None",
"def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None",
"def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None",
"def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None",
"def before_request():\n try:\n g.conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n g.conn = None",
"def _connect(self, **kwargs):\n global _connection\n if self.reuse and _connection:\n self.connection = _connection\n else:\n if pymongo.version_tuple[0] < 3:\n try:\n self.connection = Connection(host=self.host,\n port=self.port, **kwargs)\n # pymongo >= 3.0 does not raise this error\n except PyMongoError:\n if self.fail_silently:\n return\n else:\n raise\n else:\n self.connection = Connection(host=self.host, port=self.port,\n **kwargs)\n try:\n self.connection.is_locked\n except ServerSelectionTimeoutError:\n if self.fail_silently:\n return\n else:\n raise\n _connection = self.connection\n\n self.db = self.connection[self.database_name]\n if self.username is not None and self.password is not None:\n auth_db = self.connection[self.authentication_database_name]\n self.authenticated = auth_db.authenticate(self.username,\n self.password)\n\n if self.capped:\n #\n # We don't want to override the capped collection\n # (and it throws an error anyway)\n try:\n self.collection = Collection(self.db, self.collection_name,\n capped=True, max=self.capped_max,\n size=self.capped_size)\n except OperationFailure:\n # Capped collection exists, so get it.\n self.collection = self.db[self.collection_name]\n else:\n self.collection = self.db[self.collection_name]",
"def test_set_session():",
"def bootstrap(self):\n\n self.db = connection_manager.get(DbConnection, host=self.ip, port=3306, user=self.user, password=self.password)\n\n self.connected = True",
"def connect(self):\n \n # return if already connected\n if self._connected: \n return\n \n # preconditions\n if self._url is None: \n raise Exception(\"Need a connection url\")\n \n self._engine = sqlalchemy.create_engine(self._url)\n\n self._conn = self._engine.connect()\n \n self._metadata = sqlalchemy.MetaData(bind=self._engine)\n \n self._session_maker = sessionmaker(bind=self._engine)\n \n self._connected = True\n \n self._log.info(\"Connected to the database %s\"%(self._url))",
"def __init__(self, **kwargs):\r\n self._kwargs = kwargs\r\n\r\n if 'uri' in self._kwargs:\r\n self.session = get_session(self._kwargs['uri'], mode='session')\r\n else:\r\n # open a database session\r\n self.session = get_session(uri=None, mode='session', **{k: v for k, v in self._kwargs.items() if k in ('db_name', 'data_path')})",
"def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)",
"def initialize(self, session,\n prepare=lambda: None, finalize=lambda: None):\n self.session = session\n self.prepare = prepare\n self.finalize = finalize\n self._set_session_callbacks()",
"def session_setup(opts: Dict[Any, Any]) -> Any: #TODO\n stype = ''\n if 'serverca' in opts and 'cert' in opts:\n stype = 'ssl'\n s = session.get(stype, **opts)\n if s is None:\n raise errors.KojiError('Unable to idenify authentication type.')\n s.login()\n if not s.is_ok():\n raise errors.AuthError('Unable to validate session')\n return s",
"def connect_to_database():\n engine = create_engine('postgresql://catalog:password@localhost/catalog')\n Base.metadata.bind = engine\n db_session = sessionmaker(bind=engine)\n session = db_session()\n return session",
"def database_session():\n if \"CI\" in os.environ:\n con = psycopg2.connect(\n host=os.environ[\"POSTGRES_HOST\"],\n port=os.environ[\"POSTGRES_PORT\"],\n user=os.environ[\"PGUSER\"],\n password=os.environ[\"PGPASSWORD\"],\n )\n else:\n con = psycopg2.connect(host=\"127.0.0.1\", port=\"5432\")\n # Setup\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cursor = con.cursor()\n cursor.execute(f'create database \"{DB}\";')\n session = Meta.init(CONN_STRING).Session()\n yield session\n\n # Teardown\n engine = session.get_bind()\n session.close()\n engine.dispose()\n Meta.engine = None\n\n cursor.execute(f'drop database \"{DB}\";')\n cursor.close()\n con.close()",
"def _initialize_session(self):\n session = requests.Session()\n session.auth = (self.login, self.password)\n session.verify = False\n session.headers.update({'Accept': 'application/json'})\n session.headers.update({'Content-type': 'application/json'})\n return session",
"def connect_samdb_env(env_url, env_username, env_password, lp=None):\n samdb_url = env_get_var_value(env_url)\n creds = credentials.Credentials()\n if lp is None:\n # guess Credentials parameters here. Otherwise workstation\n # and domain fields are NULL and gencache code segfalts\n lp = param.LoadParm()\n creds.guess(lp)\n creds.set_username(env_get_var_value(env_username))\n creds.set_password(env_get_var_value(env_password))\n return connect_samdb(samdb_url, credentials=creds, lp=lp)",
"def before_request():\n try:\n g.conn = ENGINE.connect()\n except Exception:\n print \"uh oh, problem connecting to database\"\n # traceback.print_exc()\n g.conn = None",
"def before_request():\n\tg.db = sql.connect(host=cfg.dbhost, port=cfg.dbport, user=cfg.user,\\\n\t\tpasswd=cfg.password, db=cfg.database,\\\n\t\tcharset=cfg.charset)",
"def create_session(self):\n self._session = self.create_scoped_session()\n self.session = self._session()",
"def before_request():\n engine = cache['engine']\n try:\n g.conn = engine.connect()\n except:\n print \"error creating temporary connection to the db\"\n import traceback; traceback.print_exc()\n g.conn = None",
"def connect(cls):\n engine = create_engine(cls._build_uri(), echo=False)\n cls.Session = sessionmaker()\n cls.Session.configure(bind=engine)",
"def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)\n\n if self.sqlite_file is not None:\n dbname = 'sqlite:///%s' % self.sqlite_file\n self.sqlite_engine = create_engine(dbname, echo=False)\n self.sqlite_session = scoped_session(sessionmaker(bind=self.sqlite_engine))\n DB_Base.metadata.create_all(self.sqlite_engine)\n logger.info('Using SQLite %s' % self.sqlite_engine)",
"def __connect(self):\n session, metadata, connection = db(dbhost=getattr(self, \"host\"),\n dbuser=getattr(self, \"user\"),\n dbpass=getattr(self, \"password\"),\n dbname=getattr(self, \"dbname\"))\n return session, metadata, connection",
"def login_patient(args):\n\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Patient WHERE P_SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def check_auth(username, password):\n session.pop('username', None)\n session.pop('password', None)\n session['username'] = username\n session['password'] = password\n # Test if we can connect to a region\n connect_to_region()\n return True",
"def __init__(self):\n # Start Database Manager #\n self.dbManager = DBPM()\n self.user = None\n self.state = SessionStates.LOGGED_OUT\n\n # Make admin user - this is only needed when database is empty,\n # and admin user has never been created yet \n self.p_makeAdminUser();",
"def create_session(self, username, password, hostname, database, port=3306):\n engine = create_engine(\"mysql+mysqlconnector://%s:%s@%s:%s/%s\" %\n (username, password, hostname, port, database))\n if self.session is not None:\n self.session.close()\n Session = sessionmaker(bind=engine, autoflush=False)\n self.session = Session()",
"def __init__(self):\n with open('config.json') as config:\n data = json.load(config)\n\n password = self.decode_password(data['db']['password'])\n db_conn_string = 'postgresql://' + data['db']['username'] + ':' + password + '@' + \\\n data['db']['hostname'] + ':' + data['db']['port'] + '/' + data['db']['database']\n\n self.engine = create_engine(db_conn_string)\n try:\n conn = self.engine.connect()\n if conn is not None:\n print(\"-I- Successful Database Connection\")\n except Exception as e:\n print(\"-W- \" + str(e))",
"def init_db(self):\n self.db_config = databaseutils.process_db_config(self.config['db'])\n\n from sqlalchemy import create_engine\n from sqlalchemy.orm import sessionmaker, scoped_session\n self.engine = create_engine(self.db_config.constr, pool_recycle=3600)\n self.session = scoped_session(sessionmaker(bind=self.engine))\n\n # Make sure tables are created\n DB_Base.metadata.create_all(self.engine)",
"def login_clerk(args):\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Clerk WHERE SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def __init__(self, config):\n\n engine = self.__my_create_engine(config)\n\n if not engine:\n raise Exception(\"No engine created\")\n\n engine.connect()\n #metadata = MetaData(bind=engine)\n Session = sessionmaker(bind=engine)\n\n # Set the objects to work with\n self.session = Session()",
"def setup_session(username, password, check_url=None,\n session=None):\n return get_cookies.setup_session('https://urs.earthdata.nasa.gov',\n username=username,\n password=password,\n session=session,\n check_url=check_url)",
"def login(self):\n\n self.__login_if_required()",
"def _session(self):\n if self.session is None:\n self.session = create_session(self.config, self.auth)\n return self.session",
"def test_session_promotion(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())",
"def _initTestingDB(): \n from sqlalchemy import create_engine\n engine = create_engine('sqlite://')\n from .models import (\n Base,\n TodoUser,\n )\n DBSession.configure(bind=engine)\n Base.metadata.create_all(engine)\n \n return DBSession",
"def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())",
"def get_session():\n if MYSQL['username'] is None:\n raise ValueError(\"User name is mandatory\")\n\n if MYSQL['password'] is None:\n raise ValueError(\"Password is mandatory\")\n\n if MYSQL['host'] is None:\n raise ValueError(\"Host is mandatory\")\n\n if MYSQL['db_name'] is None:\n raise ValueError(\"Database Name is mandatory\")\n\n try:\n engine = create_engine(\n '{engine}://{username}:{password}@{host}/{db_name}'.format(**MYSQL),\n pool_size=MYSQL[\"pool_size\"],\n echo=MYSQL[\"debug\"]\n )\n\n session_factory = sessionmaker(bind=engine)\n sess = scoped_session(session_factory)\n return sess\n\n except Exception as err:\n print(err)\n exit()",
"def init_postgres(testing):\n if 'POSTGRESQL_DATABASE_URI' in app.config:\n if not testing:\n # not testing will use request context as scope\n # for sqlalchemy Session object\n from flask import _app_ctx_stack\n import pgsqlutils.base as pgbase\n from pgsqlutils.base import get_db_conf, init_db_conn\n from sqlalchemy.orm import sessionmaker, scoped_session\n dbconf = get_db_conf()\n dbconf.DATABASE_URI = app.config['POSTGRESQL_DATABASE_URI']\n # monkey patching to replace default session\n # by a sessing handled by flask\n pgbase.Session = scoped_session(\n sessionmaker(),\n scopefunc=_app_ctx_stack.__ident_func__)\n init_db_conn()\n else:\n # Testing will use current thread as scope for Session\n from pgsqlutils.base import get_db_conf, init_db_conn\n dbconf = get_db_conf()\n dbconf.DATABASE_URI = app.config['POSTGRESQL_DATABASE_URI']\n init_db_conn()",
"def session_preparation(self):\n self._test_channel_read()\n self.set_base_prompt()\n self.disable_paging(command=\"session paginate disable\")\n self.set_terminal_width(command='terminal width 511')",
"def __init__(self, db_session):\n self.db_session = db_session",
"def sessionCheck(session, req):\n\tlog(\"sessionCheck called\", session, req)\n\tif config.AUTH_TYPE=='NONE':\n\t\tlog(\"sessionCheck passed\", session, req)\n\t\tpass\n\telif config.AUTH_TYPE=='HTTP':\n\t\tif req.user is None:\n\t\t\tlog(\"sessionCheck failed\", session, req)\n\t\t\traise Exception(\"HTTP authentication misconfiguration (req.user is None)\")\n\t\telse:\n\t\t\tlog(\"sessionCheck passed\", session, req)\n\telif config.AUTH_TYPE=='FORM':\n\t\tif session.is_new() or not session.has_key('username'):\n\t\t\tlog(\"sessionCheck failed\", session, req)\n\t\t\ttry:\n\t\t\t\tutil.redirect(req, 'login.psp?redirect=%s' % urllib.quote_plus(req.unparsed_uri))\n\t\t\texcept apache.SERVER_RETURN: #fix for pre-3.3.1 bug where it uses apache.OK instead of apache.DONE (https://issues.apache.org/jira/browse/MODPYTHON-140)\n\t\t\t\traise apache.SERVER_RETURN, apache.DONE\n\t\telse:\n\t\t\tlog(\"sessionCheck passed\", session, req)\n\telse:\n\t\traise Exception(\"sanity check\")",
"def init_session(self):\n ssl_context = ssl.create_default_context(\n purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,\n cadata=None)\n ssl_settings = {\"ssl_context\": ssl_context}\n self.session = iRODSSession(\n host=self.module.params[\"host\"],\n port=self.module.params[\"port\"],\n user=self.module.params[\"admin_user\"],\n password=self.module.params[\"admin_password\"],\n zone=self.module.params[\"zone\"],\n **ssl_settings)",
"def __init__(self):\n engine = db_connect()\n create_reals_table(engine)\n self.Session = sessionmaker(bind=engine)",
"def __authenticate(self):\n try:\n self.creds = self.client.login(self.username, self.password, self.environment)\n except Thrift.TException as e:\n raise e",
"def init_db(connection, echo):\r\n\r\n # create the database tables as defined\r\n engine = create_engine(connection, echo=echo)\r\n Base.metadata.create_all(engine)\r\n\r\n # create a session\r\n Base.metadata.bind = engine\r\n BaseSession = sessionmaker(bind=engine)\r\n session = BaseSession()\r\n\r\n # set the shared Model session\r\n Model.use_session(session)\r\n\r\n return (engine, session)",
"def connect():\n # global ENGINE\n # global Session\n\n # ENGINE = create_engine(\"sqlite:///ratings.db\", echo=True)\n # Session = sessionmaker(bind=ENGINE)\n\n # return Session()\n pass",
"def init_db(self):\n\n # The user can provide a custom string\n if self.database is None:\n self.logger.error(\"You must provide a database url, exiting.\")\n sys.exit(1)\n\n self.engine = create_engine(self.database, convert_unicode=True)\n self.session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=self.engine)\n )\n\n # Database Setup\n Base.query = self.session.query_property()\n\n # import all modules here that might define models so that\n # they will be registered properly on the metadata. Otherwise\n # you will have to import them first before calling init_db()\n import expfactory.database.models\n\n self.Base = Base\n self.Base.metadata.create_all(bind=self.engine)",
"def create_connection(self):\n try:\n self.conn = psycopg2.connect(host=self.host, port=self.port, database=self.database, user=self.user, password=self.password)\n\n except:\n print(\"Unable to connect to the database. Please check your options and try again.\")\n exit()",
"def set_db_session():\n g.s = database.db_session()",
"def setup_user(self):\r\n self.email = 'foo@test.com'\r\n self.password = 'bar'\r\n self.username = 'test'\r\n self.create_account(self.username,\r\n self.email, self.password)\r\n self.activate_user(self.email)\r\n self.login(self.email, self.password)",
"def init_pre_connection(self):\n\n if \"mysql\" in PyFunceble.INTERN:\n self.__dict__.update(PyFunceble.INTERN[\"mysql\"].copy())\n\n if self.authorized and not self.pre_initiated:\n for (description, data) in self.variables.items():\n environment_var = PyFunceble.helpers.EnvironmentVariable(data[\"env\"])\n if environment_var.exists():\n setattr(\n self, \"_{0}\".format(description), environment_var.get_value(),\n )\n else:\n message = \"[MySQL/MariaDB] Please give us your DB {0} ({1}): \".format(\n description.capitalize(), repr(data[\"default\"])\n )\n\n if description != \"password\":\n user_input = input(message)\n else:\n user_input = getpass(message)\n\n if user_input:\n setattr(self, \"_{0}\".format(description), user_input)\n self.env_content[data[\"env\"]] = user_input\n else:\n setattr(self, \"_{0}\".format(description), data[\"default\"])\n self.env_content[data[\"env\"]] = data[\"default\"]\n\n # pylint: disable = attribute-defined-outside-init\n self._port = int(self._port)\n self.save_to_env_file(self.env_content, self.pyfunceble_env_location)\n self.pre_initiated = True",
"def initialize(self):\n self.login()",
"def login_doctor(args):\n con, cursor = db.connect_db()\n\n cursor.execute(\"SELECT * FROM Doctor WHERE SSN = ? AND Password = ?;\",\n (args[\"ssn\"], args[\"password\"]))\n\n # No user in database with those credentials.\n res = cursor.fetchone()\n if not res:\n con.close()\n return False\n\n # User has correct credentials.\n con.close()\n return True",
"def __init__(self, database_env='test', conf_creds=None):\n import mysql_conf as conf # This had problems when using Python 3\n import MySQLdb\n db_server, db_user, db_pass, db_schema = \\\n conf.APP_CREDS[conf.Apps.TESTCASE_REPOSITORY][database_env]\n retry_count = 3\n backoff = 1.2 # Time to wait (in seconds) between retries\n count = 0\n while count < retry_count:\n try:\n self.conn = MySQLdb.connect(host=db_server,\n user=db_user,\n passwd=db_pass,\n db=db_schema)\n self.conn.autocommit(True)\n self.cursor = self.conn.cursor()\n return\n except Exception:\n time.sleep(backoff)\n count = count + 1\n if retry_count == 3:\n raise Exception(\"Unable to connect to Database after 3 retries.\")",
"def get_db_session(src_or_dest):\n database = SQL_DBNAME\n conn_string = get_connection_string(src_or_dest)\n success, log, engine = connect_db(conn_string, database)\n session = session_open(engine)\n return session",
"def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)\n logging.info(\"****DuplicatesPipeline: database connected****\")",
"def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()",
"def setUp(self):\n config = SelectLsstImagesTask.ConfigClass()\n try:\n DbAuth.username(config.host, str(config.port)),\n except RuntimeError as e:\n reason = \"Warning: did not find host=%s, port=%s in your db-auth file; or %s \" \\\n \"skipping unit tests\" % \\\n (config.host, str(config.port), e)\n raise unittest.SkipTest(reason)",
"def __init__(self, tables, views, config, schema=None, connection_retries=2):\n # pylint: disable=too-many-arguments\n self.__no_of_retries = connection_retries\n self._set_database_engine(config)\n self._set_session()\n self.exschema = schema\n\n if not self._is_session_valid():\n self._reset_session()\n\n if not self._create(tables, views, schema, config):\n raise DatabaseError.TableCreationError(\"Table creation failed. Check logs!\")",
"def init_database() -> bool:\n global cursor\n global conn\n\n postgres_db = os.environ.get('POSTGRES_DB')\n postgres_user = os.environ.get('POSTGRES_USER')\n postgres_password = os.environ.get('POSTGRES_PASSWORD')\n postgres_host = os.environ.get('POSTGRES_HOST')\n postgres_port = os.environ.get('POSTGRES_PORT')\n\n connect_str = \"dbname='{db}' user='{user}' password='{password}' host='{host}' port={port}\".format(db=postgres_db,\n user=postgres_user,\n password=postgres_password,\n host=postgres_host,\n port=postgres_port)\n\n conn = psycopg2.connect(connect_str)\n conn.autocommit = True\n cursor = conn.cursor()\n\n \"\"\" create table vk_users \"\"\"\n return create_table_if_not_exists('ips', 'CREATE TABLE ips (ip varchar(15) PRIMARY KEY, status int8); CREATE INDEX idx_ips ON ips USING btree(ip);')",
"def login(self):\n self.session = requests.session()\n\n # Lie about the user agent because Lisa Zepto doesn't work without it.\n self.session.headers[\"User-Agent\"] = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0\"\n\n loginPayload = {\"admin_user\" : self.config.Username, \"admin_password\" : self.config.Password}\n loginResult = self.session.post(self.loginURL, data=loginPayload)\n\n if loginResult.status_code != 200:\n raise ConnectionError(\"Unable to login.\")",
"def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.session = sessionmaker(bind=engine)",
"def setup_session(self, transaction_retries=3):\n\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n self.Session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=self.engine))\n\n self.conflict_resolver = ConflictResolver(self.open_session, self.transaction_retries)\n\n for name, coin in self.coins.all():\n coin.wallet_model.backend = coin.backend\n coin.address_model.backend = coin.backend\n coin.transaction_model.backend = coin.backend\n coin.account_model.backend = coin.backend",
"def connect_db(self):\n try:\n self.connection = self.engine.connect()\n except Exception:\n self.print_std_error()",
"def prepare_connection(unpw, port=''):\n params = {\n 'database': unpw['database'],\n 'user': unpw['username'],\n 'password': unpw['password'],\n 'host': 'localhost',\n 'port': port,\n }\n return params",
"def before_request():\n start=time.time();\n try:\n conn = engine.connect()\n except:\n print \"uh oh, problem connecting to database\"\n import traceback; traceback.print_exc()\n conn = None",
"def init_connection(self, db):\n log.info(\"== Stage 1: Init ==\")\n self.use_db(db)\n self.set_no_binlog()\n self.get_mysql_settings()\n self.init_mysql_version()\n self.sanity_checks()\n self.set_tx_isolation()\n self.set_sql_mode()\n self.enable_priority_ddl()\n self.skip_cache_fill_for_myrocks()\n self.enable_sql_wsenv()\n self.override_session_vars()\n self.get_osc_lock()"
] |
[
"0.6290233",
"0.6030922",
"0.594931",
"0.58703387",
"0.5845047",
"0.57664585",
"0.5687237",
"0.56795657",
"0.56795657",
"0.5650503",
"0.56293213",
"0.5617835",
"0.55368704",
"0.5494034",
"0.54863006",
"0.54586",
"0.54352534",
"0.5423377",
"0.5407348",
"0.5407348",
"0.5407348",
"0.5393001",
"0.5379599",
"0.5369715",
"0.5367152",
"0.53569645",
"0.5351309",
"0.53472376",
"0.5344177",
"0.5342822",
"0.5323919",
"0.5321011",
"0.5314367",
"0.5314367",
"0.5314367",
"0.5314367",
"0.5293358",
"0.5284075",
"0.5279441",
"0.5275692",
"0.52745265",
"0.5268967",
"0.52678823",
"0.525782",
"0.52530605",
"0.52511746",
"0.52480024",
"0.52466786",
"0.52448976",
"0.5241792",
"0.52342695",
"0.52257794",
"0.5224957",
"0.5224821",
"0.52234215",
"0.52165383",
"0.521013",
"0.5208473",
"0.5198573",
"0.51953745",
"0.5190762",
"0.5185769",
"0.5184111",
"0.5175187",
"0.5173525",
"0.5155794",
"0.5153997",
"0.51483065",
"0.51332146",
"0.5127381",
"0.51268727",
"0.5122344",
"0.51164013",
"0.51059616",
"0.5103458",
"0.5101615",
"0.50953126",
"0.5085825",
"0.5085805",
"0.5085264",
"0.50820446",
"0.50745803",
"0.50643766",
"0.50626355",
"0.50551754",
"0.50489855",
"0.5043054",
"0.5032476",
"0.5026771",
"0.50242144",
"0.50116616",
"0.5010045",
"0.50090355",
"0.5005817",
"0.5003971",
"0.49996793",
"0.4999318",
"0.49951354",
"0.4992825",
"0.4992727"
] |
0.5441848
|
16
|
Create an SSL context suitable for accepting session requests
|
def __init__(self):
try:
context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.options |= ssl.OP_NO_TLSv1
context.options |= ssl.OP_NO_TLSv1_1
context.options |= ssl.OP_NO_COMPRESSION
context.verify_mode = ssl.CERT_REQUIRED
# TODO do not use static configuration parameters
context.load_verify_locations(cafile='/sbin/rpcsd/root.cert.pem')
context.load_cert_chain(certfile='/sbin/rpcsd/gaps.pem')
context.set_ciphers('AES128-SHA256')
RPCS.context = context
except FileNotFoundError:
# If we can't set up TLS context, log error and exit
LOG.error("Could not setup TLS context: certificate file(s) "
"not present in the correct directory")
exit(1)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _create_ssl_context(cfg):\n ctx = ssl.SSLContext(cfg.ssl_version)\n ctx.load_cert_chain(cfg.certfile, cfg.keyfile)\n ctx.verify_mode = cfg.cert_reqs\n if cfg.ca_certs:\n ctx.load_verify_locations(cfg.ca_certs)\n if cfg.ciphers:\n ctx.set_ciphers(cfg.ciphers)\n return ctx",
"def _get_ssl_context(self):\n context = ssl.SSLContext(self.TLS_VERSION)\n context.load_cert_chain(self.ssl_cert, self.ssl_key)\n return context",
"def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx",
"async def _create_context(self) -> ssl.SSLContext:\n context = utils.server_context_modern()\n\n await self.cloud.run_executor(\n context.load_cert_chain,\n self._acme.path_fullchain,\n self._acme.path_private_key,\n )\n\n return context",
"def ssl_options_to_context(ssl_options):\n ...",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv23_METHOD)\n ctx.use_certificate_file(Settings.BaseDir+'/server.pem')\n ctx.use_privatekey_file(Settings.BaseDir+'/privkey.pem')\n return ctx",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv3_METHOD)\n ctx.use_certificate_file(config.client.certificate)\n ctx.use_privatekey_file(config.client.private_key)\n return ctx",
"def create_ssl_context(cert, key, **kwargs):\n if hasattr(ssl, 'SSLContext'):\n ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER, **kwargs)\n ctx.load_cert_chain(cert, key)\n return ctx\n\n if isinstance(cert, str):\n with open(cert, 'rb') as f:\n cert = f.read()\n if isinstance(key, str):\n with open(key, 'rb') as f:\n key = f.read()\n\n class FakeSSLSocket:\n def __init__(self, sock, **kwargs):\n self.sock = sock\n self.kwargs = kwargs\n\n def accept(self):\n client, addr = self.sock.accept()\n return (ssl.wrap_socket(client, cert=cert, key=key, **self.kwargs),\n addr)\n\n def close(self):\n self.sock.close()\n\n class FakeSSLContext:\n def __init__(self, **kwargs):\n self.kwargs = kwargs\n\n def wrap_socket(self, sock, **kwargs):\n all_kwargs = self.kwargs.copy()\n all_kwargs.update(kwargs)\n return FakeSSLSocket(sock, **all_kwargs)\n\n return FakeSSLContext(**kwargs)",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def __call__(self):\n ssl_mode, external_ca = ssl_utils.get_ssl_mode()\n\n ctxt = {\n 'ssl_mode': ssl_mode,\n }\n\n if ssl_mode == 'off':\n close_port(config('ssl_port'))\n ssl_utils.reconfigure_client_ssl()\n return ctxt\n\n ssl_key = convert_from_base64(config('ssl_key'))\n ssl_cert = convert_from_base64(config('ssl_cert'))\n ssl_ca = convert_from_base64(config('ssl_ca'))\n ssl_port = config('ssl_port')\n\n # If external managed certs then we need all the fields.\n if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and\n not all((ssl_key, ssl_cert))):\n log('If ssl_key or ssl_cert are specified both are required.',\n level=ERROR)\n sys.exit(1)\n\n if not external_ca:\n ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()\n\n ctxt.update(self.enable_ssl(\n ssl_key, ssl_cert, ssl_port, ssl_ca,\n ssl_only=(ssl_mode == \"only\"), ssl_client=False\n ))\n\n ssl_utils.reconfigure_client_ssl(True)\n open_port(ssl_port)\n\n return ctxt",
"def get_ssl_context() -> ssl.SSLContext | None:\n if (ca_folder := os.getenv('ref_ca')) is None: # noqa: SIM112\n return None\n return mk_ssl_context_from_folder(ca_folder,\n private_key='user_private_key_encrypted.pem',\n certificate='user_certificate_root_signed.pem',\n ca_public_key='root_certificate.pem',\n cyphers_file=None,\n ssl_passwd=os.getenv('ref_ssl_passwd')) # noqa: SIM112",
"def create_tls_context(TLSSTRENGTH):\n\n #CREATE a CONTEXT that we can then update\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)\n\n if TLSSTRENGTH == \"tls1_3\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_3)\n\n if TLSSTRENGTH == \"tls1_2\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)\n\n elif TLSSTRENGTH == \"tls1_1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)\n\n elif TLSSTRENGTH == \"tls1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1)\n\n else:\n print(\"Valid TLS Protocol Not Found: Needs to be in OpenSSL format: tls_1, tls_1_1 tls_2\")\n return\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n print(\"TLS Protocol Specified: {}\".format(TLSSTRENGTH))\n return context",
"def context():\n return Context(SSLv23_METHOD)",
"def create_tls_context_factory():\n\n # Load the server's private key, public X509 certificate, and the certificate authority's X509 certificate\n with open(Configuration.get('tls-ca-cert-location')) as ca_certificate_file:\n ca_certificate = ssl.Certificate.loadPEM(ca_certificate_file.read())\n with open(Configuration.get('tls-private-key-location')) as private_key_file:\n with open(Configuration.get('tls-public-cert-location')) as public_certificate_file:\n server_certificate = ssl.PrivateCertificate.loadPEM(private_key_file.read() + public_certificate_file.read())\n\n server_context_factory = server_certificate.options(ca_certificate)\n\n return server_context_factory",
"def _get_noverify_context(self):\n from twisted.internet.ssl import ClientContextFactory\n context_factory = ClientContextFactory()\n if self.ssl_method is not None:\n context_factory.method = self.ssl_method\n return context_factory.getContext()",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv3_METHOD)\n ctx.use_certificate_file(config.server.certificate)\n ctx.use_privatekey_file(config.server.private_key)\n ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n self.verify_certificate)\n # Since we have self-signed certs we have to explicitly\n # tell the server to trust them.\n ctx.load_verify_locations(config.server.root_ca)\n return ctx",
"def _create_ssl_request(self, url):\r\n request = self.factory.get(url)\r\n request.META['SSL_CLIENT_S_DN'] = self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request",
"def create_server_context(domain, container_path, context_path=None, use_ssl=True):\n server_context = dict(domain=domain, container_path=container_path, context_path=context_path)\n\n if use_ssl:\n scheme = 'https'\n else:\n scheme = 'http'\n scheme += '://'\n\n if use_ssl:\n session = requests.Session()\n session.mount(scheme, SafeTLSAdapter())\n else:\n # TODO: Is there a better way? Can we have session.mount('http')?\n session = requests\n\n server_context['scheme'] = scheme\n server_context['session'] = session\n\n return server_context",
"def new_context(self, host: str) -> ssl.SSLContext:\n\n # Generates cert/key for the host.\n cert, key = self.new_X509(host)\n\n # Dump the cert and key.\n cert_dump = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n key_dump = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)\n\n # Store cert and key into file. Unfortunately we need to store them in disk\n # because SSLContext does not support loading from memory. This is a limitation\n # of the Python standard library, and the community: https://bugs.python.org/issue16487\n # Alternatives cannot be used for this because this context is eventually used\n # by asyncio.get_event_loop().start_tls(..., sslcontext=..., ...) parameter,\n # which only support ssl.SSLContext. To mitigate this we use lru_cache to\n # cache the SSLContext for each host. It works fairly well, but its not the\n # preferred way to do it... loading from memory would be better.\n cert_path, key_path = __data__ / \"temp.crt\", __data__ / \"temp.key\"\n cert_path.parent.mkdir(parents=True, exist_ok=True)\n with cert_path.open(\"wb\") as file:\n file.write(cert_dump)\n key_path.parent.mkdir(parents=True, exist_ok=True)\n with key_path.open(\"wb\") as file:\n file.write(key_dump)\n\n # Creates new SSLContext.\n context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)\n context.load_cert_chain(certfile=cert_path, keyfile=key_path)\n\n # Remove the temporary files.\n cert_path.unlink()\n key_path.unlink()\n\n return context",
"def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context",
"def init_session(self):\n ssl_context = ssl.create_default_context(\n purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None,\n cadata=None)\n ssl_settings = {\"ssl_context\": ssl_context}\n self.session = iRODSSession(\n host=self.module.params[\"host\"],\n port=self.module.params[\"port\"],\n user=self.module.params[\"admin_user\"],\n password=self.module.params[\"admin_password\"],\n zone=self.module.params[\"zone\"],\n **ssl_settings)",
"def test_getContext(self):\n contextFactory = crypto.SSLVerifyingContextFactory(self.url)\n self.assertIsInstance(contextFactory.getContext(),\n OpenSSL.SSL.Context)",
"def createContextFactory(self):\n return ChainingOpenSSLContextFactory(\n config.SSLPrivateKey,\n config.SSLCertificate,\n certificateChainFile=config.SSLAuthorityChain,\n passwdCallback=getSSLPassphrase,\n keychainIdentity=config.SSLKeychainIdentity,\n sslmethod=getattr(OpenSSL.SSL, config.SSLMethod),\n ciphers=config.SSLCiphers.strip(),\n verifyClient=config.Authentication.ClientCertificate.Enabled,\n requireClientCertificate=config.Authentication.ClientCertificate.Required,\n clientCACertFileNames=config.Authentication.ClientCertificate.CAFiles,\n sendCAsToClient=config.Authentication.ClientCertificate.SendCAsToClient,\n )",
"def create_no_verify_ssl_context() -> ssl.SSLContext:\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)\n sslcontext.check_hostname = False\n sslcontext.verify_mode = ssl.CERT_NONE\n # Allow all ciphers rather than only Python 3.10 default\n sslcontext.set_ciphers(\"DEFAULT\")\n with contextlib.suppress(AttributeError):\n # This only works for OpenSSL >= 1.0.0\n sslcontext.options |= ssl.OP_NO_COMPRESSION\n sslcontext.set_default_verify_paths()\n # ssl.OP_LEGACY_SERVER_CONNECT is only available in Python 3.12a4+\n sslcontext.options |= getattr(ssl, \"OP_LEGACY_SERVER_CONNECT\", 0x4)\n return sslcontext",
"def ssl_context(self) -> SSLContext | None:\n if (\n self.security_protocol != KafkaSecurityProtocol.SSL\n or self.cluster_ca_path is None\n or self.client_cert_path is None\n or self.client_key_path is None\n ):\n return None\n\n client_cert_path = Path(self.client_cert_path)\n\n if self.client_ca_path is not None:\n # Need to contatenate the client cert and CA certificates. This is\n # typical for Strimzi-based Kafka clusters.\n if self.cert_temp_dir is None:\n raise RuntimeError(\n \"KAFKIT_KAFKA_CERT_TEMP_DIR must be set when \"\n \"a client CA certificate is provided.\"\n )\n client_ca = Path(self.client_ca_path).read_text()\n client_cert = Path(self.client_cert_path).read_text()\n sep = \"\" if client_ca.endswith(\"\\n\") else \"\\n\"\n new_client_cert = sep.join([client_cert, client_ca])\n new_client_cert_path = Path(self.cert_temp_dir) / \"client.crt\"\n new_client_cert_path.write_text(new_client_cert)\n client_cert_path = Path(new_client_cert_path)\n\n return create_ssl_context(\n cluster_ca_path=Path(self.cluster_ca_path),\n client_cert_path=client_cert_path,\n client_key_path=Path(self.client_key_path),\n )",
"async def test_simple_get_ssl_ctx(app, aiohttp_server, ssl_context):\n server = await aiohttp_server(app, ssl=ssl_context)\n url = \"https://localhost:%d\" % server.port\n\n ssl_context = ssl.create_default_context(\n ssl.Purpose.SERVER_AUTH,\n )\n ssl_context.check_hostname = False\n ssl_context.verify_mode = ssl.CERT_NONE\n async with aiosonic.HTTPClient() as client:\n res = await client.get(url, ssl=ssl_context)\n assert res.status_code == 200\n assert await res.text() == \"Hello, world\"\n await server.close()",
"def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE",
"def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)",
"def _load_ssl_certificate(self) -> ssl.SSLContext:\n\n sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)\n sslcontext.load_cert_chain(\n path.join(path.dirname(__file__), '..', '..', 'player.crt'),\n path.join(path.dirname(__file__), '..', '..', 'player.key')\n )\n\n return sslcontext",
"def test_get_context(self):\n context = Context(SSLv23_METHOD)\n connection = Connection(context, None)\n assert connection.get_context() is context",
"def getContext(self, hostname=None, port=None):\n ctx = super(SSLVerifyingContextFactory, self).getContext()\n store = ctx.get_cert_store()\n verifyOptions = OpenSSL.SSL.VERIFY_PEER\n ctx.set_verify(verifyOptions, self.verifyHostname)\n return ctx",
"def set_ssl_context(self, ssl_verify, ssl_cafile):\n if not ssl_verify:\n self.ssl_context = ssl.create_default_context()\n self.ssl_context.check_hostname = False\n self.ssl_context.verify_mode = ssl.CERT_NONE\n elif ssl_cafile:\n self.ssl_context = ssl.create_default_context(cafile=ssl_cafile)\n else:\n self.ssl_context = ssl.create_default_context()",
"def __init__(self, cert_string=None, cert_file=None, key_string=None, key_file=None, passphrase=None):\n self._context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n\n if cert_file:\n # we have to load certificate for equality check. there is no\n # other way to obtain certificate from context.\n with open(cert_file, 'rb') as fp:\n cert_string = fp.read()\n\n cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_string)\n self._context.use_certificate(cert)\n\n if not key_string and not key_file:\n # OpenSSL is smart enought to locate private key in certificate\n args = [OpenSSL.crypto.FILETYPE_PEM, cert_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n elif key_file and not passphrase:\n self._context.use_privatekey_file(key_file, OpenSSL.crypto.FILETYPE_PEM)\n\n else:\n if key_file:\n # key file is provided with passphrase. context.use_privatekey_file\n # does not use passphrase, so we have to load the key file manually.\n with open(key_file, 'rb') as fp:\n key_string = fp.read()\n\n args = [OpenSSL.crypto.FILETYPE_PEM, key_string]\n if passphrase is not None:\n args.append(passphrase)\n\n pk = OpenSSL.crypto.load_privatekey(*args)\n self._context.use_privatekey(pk)\n\n # check if we are not passed some garbage\n self._context.check_privatekey()\n\n # used to compare certificates.\n self._equality = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)",
"def _create_session(self) -> Session:\n session = Session()\n\n # Sets the client side and server side SSL cert verification, if provided as properties.\n if ssl_config := self.properties.get(SSL):\n if ssl_ca_bundle := ssl_config.get(CA_BUNDLE): # type: ignore\n session.verify = ssl_ca_bundle\n if ssl_client := ssl_config.get(CLIENT): # type: ignore\n if all(k in ssl_client for k in (CERT, KEY)):\n session.cert = (ssl_client[CERT], ssl_client[KEY])\n elif ssl_client_cert := ssl_client.get(CERT):\n session.cert = ssl_client_cert\n\n # If we have credentials, but not a token, we want to fetch a token\n if TOKEN not in self.properties and CREDENTIAL in self.properties:\n self.properties[TOKEN] = self._fetch_access_token(session, self.properties[CREDENTIAL])\n\n # Set Auth token for subsequent calls in the session\n if token := self.properties.get(TOKEN):\n session.headers[AUTHORIZATION_HEADER] = f\"{BEARER_PREFIX} {token}\"\n\n # Set HTTP headers\n session.headers[\"Content-type\"] = \"application/json\"\n session.headers[\"X-Client-Version\"] = ICEBERG_REST_SPEC_VERSION\n session.headers[\"User-Agent\"] = f\"PyIceberg/{__version__}\"\n\n # Configure SigV4 Request Signing\n if str(self.properties.get(SIGV4, False)).lower() == \"true\":\n self._init_sigv4(session)\n\n return session",
"def ssl_connect(host, port = DEFAULT_SERVER_SSL_PORT, keyfile = None, \n certfile = None, ca_certs = None, ssl_version = None):\n return factory.ssl_connect(host, port, keyfile = keyfile, certfile = certfile,\n ssl_version = ssl_version, ca_certs = ca_certs, service = SlaveService)",
"def _create_socket_context(self):\n # Find upper bound on ACTIME from constants and set timeout to double\n # that\n timeout = int(2000 * self.p_constants[\"ACTIME_UPPER\"])\n\n context = zmq.Context() # Create Context\n socket = context.socket(zmq.REQ) # Create socket\n # Connect to dining philosophers\n socket.connect(self.p_constants[\"SERV_ADDR\"])\n socket.RCVTIMEO = timeout # Set timeout\n\n return context, socket",
"def ssl_wrap_socket(socket, ssl_options, server_hostname=..., **kwargs):\n ...",
"def __init__(self, query, sock_timeout=3, **kw):\n if not session.session or not session.session.cookies:\n raise SessionNotFound('No SMC session found. You must first '\n 'obtain an SMC session through session.login before making '\n 'a web socket connection.')\n \n sslopt = {}\n if session.is_ssl:\n # SSL verification is based on the session settings since the\n # session must be made before calling this class. If verify=True, \n # try to get the CA bundle from certifi if the package exists\n # Set check_hostname to False because python ssl doesn't appear\n # to validate the subjectAltName properly, however requests does\n # and would have already validated this when the session was set\n # up. This can still be overridden by setting check_hostname=True.\n sslopt.update(\n cert_reqs=ssl.CERT_NONE,\n check_hostname=False)\n \n certfile = session.session.verify\n if certfile:\n if isinstance(certfile, bool): # verify=True\n certfile = _get_ca_bundle()\n if certfile is None:\n certfile = ''\n \n sslopt.update(\n cert_reqs=kw.pop('cert_reqs', ssl.CERT_REQUIRED),\n check_hostname=kw.pop('check_hostname', False))\n \n if sslopt.get('cert_reqs') != ssl.CERT_NONE:\n os.environ['WEBSOCKET_CLIENT_CA_BUNDLE'] = certfile\n \n # Enable multithread locking\n if 'enable_multithread' not in kw:\n kw.update(enable_multithread=True)\n \n # Max number of receives, configurable for batching\n self.max_recv = kw.pop('max_recv', 0)\n \n super(SMCSocketProtocol, self).__init__(sslopt=sslopt, **kw)\n \n self.query = query\n self.fetch_id = None\n # Inner thread used to keep socket select alive\n self.thread = None\n self.event = threading.Event()\n self.sock_timeout = sock_timeout",
"def create_listening_ssl_socket(address, certfile, keyfile):\r\n # check if 2 files exist. If not, raise exceptions\r\n if os.path.isfile(certfile) and os.path.isfile(keyfile):\r\n ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\r\n ssl_context.options |= (\r\n ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION\r\n )\r\n ssl_context.set_ciphers(\"ECDHE+AESGCM\")\r\n ssl_context.load_cert_chain(certfile=certfile, keyfile=keyfile)\r\n ssl_context.set_alpn_protocols([\"h2\"])\r\n\r\n sock = socket.socket()\r\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n csock = ssl_context.wrap_socket(sock)\r\n while True:\r\n try:\r\n csock.send(None)\r\n except StopIteration as e:\r\n sock = e.value\r\n sock.bind(address)\r\n sock.listen()\r\n return sock\r\n else:\r\n raise FileNotFoundError(certfile + \" and/or \" + keyfile + \" don't exist. HTTP/2 needs certificate files.\")",
"def __init__(\n self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True\n ):\n self.incoming = ssl.MemoryBIO()\n self.outgoing = ssl.MemoryBIO()\n\n self.suppress_ragged_eofs = suppress_ragged_eofs\n self.socket = socket\n\n self.sslobj = ssl_context.wrap_bio(\n self.incoming, self.outgoing, server_hostname=server_hostname\n )\n\n # Perform initial handshake.\n self._ssl_io_loop(self.sslobj.do_handshake)",
"def ConnectSSL(self):\n with open(self.DEFAULT_CLIENT_KEY_FILE, 'rb') as f:\n private_key = f.read()\n with open(self.DEFAULT_CLIENT_CHAIN_FILE, 'rb') as f:\n certificate_chain = f.read()\n with open(self.DEFAULT_ROOT_CERT_FILE, 'rb') as f:\n root_ca = f.read()\n credentials = grpc.ssl_channel_credentials(root_certificates=root_ca, private_key=private_key, certificate_chain=certificate_chain)\n self.channel = grpc.secure_channel(self.address, credentials)\n self._setup()",
"def __init__(self, context, **kwargs):\n suds.transport.http.HttpTransport.__init__(self, **kwargs)\n self.ssl_context = context\n self.verify = (context and context.verify_mode != ssl.CERT_NONE)",
"def create_server_certs_enc():\n global server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if same_enc_sign_cert:\n dn = \"/CN=server certificate RSA\"\n else:\n dn = \"/CN=server certificate encryption RSA\"\n key_pair_rsa = create_csr(dn)\n server_keystore[\"key\"] = key_pair_rsa[\"key\"]\n san = [f'URI.1 = {uuid.uuid4().urn}']\n server_keystore[\"crt\"] = sign_csr(key_pair_rsa[\"pub\"], dn, san)",
"def fetch_x509_context(self) -> X509Context:",
"def startSSL(self, ssl_options={}):\n if self.ssl_enabled:\n raise RuntimeError(\"startSSL() called on SSL-enabled %r.\" % self)\n\n if self._closed:\n raise RuntimeError(\"startSSL() called on closed %r.\" % self)\n\n if ssl_options.setdefault(\"server_side\", True) is not True:\n raise ValueError(\"SSL option 'server_side' must be True.\")\n\n if ssl_options.setdefault(\"do_handshake_on_connect\", False) is not False:\n raise ValueError(\"SSL option 'do_handshake_on_connect' must be False.\")\n\n self.ssl_enabled = True\n self._ssl_options = ssl_options\n\n return self",
"def session_setup(opts: Dict[Any, Any]) -> Any: #TODO\n stype = ''\n if 'serverca' in opts and 'cert' in opts:\n stype = 'ssl'\n s = session.get(stype, **opts)\n if s is None:\n raise errors.KojiError('Unable to idenify authentication type.')\n s.login()\n if not s.is_ok():\n raise errors.AuthError('Unable to validate session')\n return s",
"def get_ssl_certificate():",
"def get_ssl_certificate() :",
"def setupBaseSecurityContext(self):\n\n resp = gss.initSecContext(self.service_name,\n flags=self.flags,\n mech_type=self.mech_type,\n ttl=self.ttl)\n\n (self.ctx, _, _, self.token, self.last_ttl, _) = resp\n return self.token",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"async def _mk_http_connection(self) -> ClientSession:\n if self._ssl_context is not None:\n connector = TCPConnector(ssl=self._ssl_context)\n base_url = f'https://{self._netloc}/'\n else:\n connector = TCPConnector()\n base_url = f'http://{self._netloc}/'\n\n return ClientSession(base_url, connector=connector, timeout=ClientTimeout(self._socket_timeout))",
"def __init__(self,\n session: Optional[ClientSession] = None,\n endpoint: str = 'https://127.0.0.1:28183',\n ssl_verify: str = '/home/user/.joinmarket/ssl') -> None:\n\n self._id_count = 0\n if session:\n self._session = session\n else:\n ssl = create_default_context(cafile=f'{ssl_verify}/cert.pem')\n self._session = ClientSession(json_serialize=dumps,\n headers=HEADERS,\n connector=TCPConnector(ssl=ssl),\n timeout=ClientTimeout(total=15))\n self._endpoint = endpoint\n self._ws: Optional[ClientWebSocketResponse] = None",
"def __init__(self, id):\n self.id = id\n ctx = SSL.Context(SSL.SSLv23_METHOD)\n # TODO: Make the file names configurable.\n try:\n ctx.use_certificate_file('player-%d.cert' % id)\n ctx.use_privatekey_file('player-%d.key' % id)\n ctx.check_privatekey()\n ctx.load_verify_locations('ca.cert')\n ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,\n lambda conn, cert, errnum, depth, ok: ok)\n self.ctx = ctx\n except SSL.Error, e:\n print \"SSL errors - did you forget to generate certificates?\"\n for (lib, func, reason) in e.args[0]:\n print \"* %s in %s: %s\" % (func, lib, reason)\n raise SystemExit(\"Stopping program\")",
"def test_ssl_env( # noqa: C901 # FIXME\n thread_exceptions,\n recwarn,\n mocker,\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, tls_verify_mode, tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n use_client_cert,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob('127.0.0.1'),\n ):\n client_cert = ca.issue_cert(ntou('127.0.0.1'))\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlswsgiserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)\n\n resp = requests.get(\n 'https://' + interface + ':' + str(port) + '/env',\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n cert=cl_pem if use_client_cert else None,\n )\n\n env = json.loads(resp.content.decode('utf-8'))\n\n # hard coded env\n assert env['wsgi.url_scheme'] == 'https'\n assert env['HTTPS'] == 'on'\n\n # ensure these are present\n for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:\n assert key in env\n\n # pyOpenSSL generates the env before the handshake completes\n if adapter_type == 'pyopenssl':\n return\n\n for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:\n assert key in env\n\n # client certificate env\n if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:\n assert env['SSL_CLIENT_VERIFY'] == 'NONE'\n else:\n assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'\n\n with open(cl_pem, 'rt') as f:\n assert env['SSL_CLIENT_CERT'] in f.read()\n\n for key in {\n 'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',\n 'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',\n }:\n assert key in env\n\n # builtin ssl environment generation may use a loopback socket\n # ensure no ResourceWarning was raised during the test\n if IS_PYPY:\n # NOTE: PyPy doesn't have ResourceWarning\n # Ref: https://doc.pypy.org/en/latest/cpython_differences.html\n return\n for warn in recwarn:\n if not issubclass(warn.category, ResourceWarning):\n continue\n\n # the tests can sporadically generate resource warnings\n # due to timing issues\n # all of these sporadic warnings appear to be about socket.socket\n # and have been observed to come from requests connection pool\n msg = str(warn.message)\n if 'socket.socket' in msg:\n pytest.xfail(\n '\\n'.join((\n 'Sometimes this test fails due to '\n 'a socket.socket ResourceWarning:',\n msg,\n )),\n )\n pytest.fail(msg)\n\n # to perform the ssl handshake over that loopback socket,\n # the builtin ssl environment generation uses a thread\n for _, _, trace in thread_exceptions:\n print(trace, file=sys.stderr)\n assert not thread_exceptions, ': '.join((\n thread_exceptions[0][0].__name__,\n thread_exceptions[0][1],\n ))",
"def init_server_state(config, publish_socket):\n cert_store = _init_cert_store(config)\n interceptor = _init_interceptor(config, publish_socket)\n\n return ServerContext(\n config=config, interceptor=interceptor, cert_store=cert_store)",
"def signed_session(self, session=None):\n\n if session:\n session = super(ClientCertAuthentication, self).signed_session(session)\n else:\n session = super(ClientCertAuthentication, self).signed_session()\n\n if self.cert is not None:\n session.cert = self.cert\n if self.ca_cert is not None:\n session.verify = self.ca_cert\n if self.no_verify:\n session.verify = False\n\n return session",
"def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf",
"def _client(self, sock):\n # Now create the client side Connection. Similar boilerplate to the\n # above.\n client_ctx = Context(SSLv23_METHOD)\n client_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)\n client_ctx.set_verify(\n VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,\n verify_cb,\n )\n client_store = client_ctx.get_cert_store()\n client_ctx.use_privatekey(\n load_privatekey(FILETYPE_PEM, client_key_pem)\n )\n client_ctx.use_certificate(\n load_certificate(FILETYPE_PEM, client_cert_pem)\n )\n client_ctx.check_privatekey()\n client_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))\n client_conn = Connection(client_ctx, sock)\n client_conn.set_connect_state()\n return client_conn",
"def _client_session(self, data):\n self._check_ca_certificate()\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"127.0.0.1\", self._app_port))\n secure_socket = ssl.wrap_socket(\n client_socket,\n cert_reqs=ssl.CERT_REQUIRED,\n ssl_version=ssl.PROTOCOL_TLSv1_2,\n ca_certs=self._ca_certificate_path,\n ciphers=self._ciphers)\n\n secure_socket.write(str.encode(data))\n secure_socket.close()\n client_socket.close()",
"def _server_connection(self, callback, data):\n ctx = Context(SSLv23_METHOD)\n ctx.use_privatekey(load_privatekey(FILETYPE_PEM, server_key_pem))\n ctx.use_certificate(load_certificate(FILETYPE_PEM, server_cert_pem))\n ctx.set_ocsp_server_callback(callback, data)\n server = Connection(ctx)\n server.set_accept_state()\n return server",
"def startTLSAndAssertSession(self):\n success = []\n self.connected.addCallback(strip(self.client.startTLS))\n def checkSecure(ignored):\n self.assertTrue(\n interfaces.ISSLTransport.providedBy(self.client.transport))\n self.connected.addCallback(checkSecure)\n self.connected.addCallback(success.append)\n\n d = self.loopback()\n d.addCallback(lambda x : self.assertTrue(success))\n return defer.gatherResults([d, self.connected])",
"def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None,\n cert_reqs=None, ssl_version=None, ciphers=None,\n service=VoidService, config={}, ipv6=False, keepalive=False, verify_mode=None):\n ssl_kwargs = {\"server_side\": False}\n if keyfile is not None:\n ssl_kwargs[\"keyfile\"] = keyfile\n if certfile is not None:\n ssl_kwargs[\"certfile\"] = certfile\n if verify_mode is not None:\n ssl_kwargs[\"cert_reqs\"] = verify_mode\n else:\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n if ca_certs is not None:\n ssl_kwargs[\"ca_certs\"] = ca_certs\n ssl_kwargs[\"cert_reqs\"] = ssl.CERT_REQUIRED\n if cert_reqs is not None:\n ssl_kwargs[\"cert_reqs\"] = cert_reqs\n elif cert_reqs != ssl.CERT_NONE:\n ssl_kwargs[\"check_hostname\"] = False\n if ssl_version is not None:\n ssl_kwargs[\"ssl_version\"] = ssl_version\n if ciphers is not None:\n ssl_kwargs[\"ciphers\"] = ciphers\n s = SocketStream.ssl_connect(host, port, ssl_kwargs, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)",
"def start(self):\n if self._use_ssl:\n try:\n ca_file = CONF.ssl_ca_file\n cert_file = CONF.ssl_cert_file\n key_file = CONF.ssl_key_file\n\n if cert_file and not os.path.exists(cert_file):\n raise RuntimeError(\n _(\"Unable to find cert_file : %s\") % cert_file)\n\n if ca_file and not os.path.exists(ca_file):\n raise RuntimeError(\n _(\"Unable to find ca_file : %s\") % ca_file)\n\n if key_file and not os.path.exists(key_file):\n raise RuntimeError(\n _(\"Unable to find key_file : %s\") % key_file)\n\n if self._use_ssl and (not cert_file or not key_file):\n raise RuntimeError(\n _(\"When running server in SSL mode, you must \"\n \"specify both a cert_file and key_file \"\n \"option value in your configuration file\"))\n ssl_kwargs = {\n 'server_side': True,\n 'certfile': cert_file,\n 'keyfile': key_file,\n 'cert_reqs': ssl.CERT_NONE,\n }\n\n if CONF.ssl_ca_file:\n ssl_kwargs['ca_certs'] = ca_file\n ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED\n\n self._socket = eventlet.wrap_ssl(self._socket,\n **ssl_kwargs)\n\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_REUSEADDR, 1)\n # sockets can hang around forever without keepalive\n self._socket.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE, 1)\n\n # This option isn't available in the OS X version of eventlet\n if hasattr(socket, 'TCP_KEEPIDLE'):\n self._socket.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n CONF.tcp_keepidle)\n\n except Exception:\n with excutils.save_and_reraise_exception():\n LOG.error(_(\"Failed to start %(name)s on %(host)s\"\n \":%(port)s with SSL support\") % self.__dict__)\n\n wsgi_kwargs = {\n 'func': eventlet.wsgi.server,\n 'sock': self._socket,\n 'site': self.app,\n 'protocol': self._protocol,\n 'custom_pool': self._pool,\n 'log': self._wsgi_logger,\n 'log_format': CONF.wsgi_log_format\n }\n\n if self._max_url_len:\n wsgi_kwargs['url_length_limit'] = self._max_url_len\n\n self._server = eventlet.spawn(**wsgi_kwargs)",
"def set_ssl(self):\n for params in self.config.get_ssl_params():\n self.connection.transport.set_ssl(**params)",
"def test_set_context(self):\n original = Context(SSLv23_METHOD)\n replacement = Context(SSLv23_METHOD)\n connection = Connection(original, None)\n connection.set_context(replacement)\n assert replacement is connection.get_context()\n # Lose our references to the contexts, just in case the Connection\n # isn't properly managing its own contributions to their reference\n # counts.\n del original, replacement\n collect()",
"async def set_ssl_context(self, ssl_context: Optional[SSLContext]) -> None:\n self.ssl_context = ssl_context\n\n if ssl_context is None:\n _LOGGER.debug(\"Disabling SSL for event listener server\")\n else:\n _LOGGER.debug(\"Enabling SSL for event listener server\")\n\n if self._server:\n self._server.stop()\n await self._start_server()",
"def create_session(obj):\n session = requests.Session()\n if obj.user is not None and obj.password is not None:\n session.auth = (obj.user, obj.password)\n\n # Proxy setup\n if obj.proxy is not None:\n proxy = '%s://%s:%s' % (translate_proxy_scheme(obj.proxy_type),\n obj.proxy_host, obj.proxy_port)\n session.proxies = {'http': proxy, 'https': proxy}\n\n # Emulate curl's way of handling SSL\n if obj.cainfo is not None:\n # CA certificates\n session.verify = obj.cainfo\n if obj.sslcert is not None:\n # Client certificate\n session.cert = obj.sslcert\n if obj.verifypeer is not None and not obj.verifypeer:\n # Disable certificate validation\n session.verify = False\n if obj.verifyhost is not None and not obj.verifyhost:\n # Check the certificate, but do not verify that the hostname matches it.\n session.mount('https://', HostNameIgnoringAdapter())\n else:\n # Setup the retry strategy\n session.mount('https://', HTTPAdapter(max_retries=retries))\n # setup retry strategy for http connections\n session.mount('http://', HTTPAdapter(max_retries=retries))\n\n return session",
"def SpoofSSL(self, request, connection):\n self.log.debug('Entering SpoofSSL')\n target_host = request.GetTargetHost()\n\n self.log.debug('target_host: %s:%s' % target_host)\n\n context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)\n\n if not self.cert:\n raise ValueError, 'self.cert not defined: Can not spoof SSL without cert'\n\n context.use_privatekey_file(self.cert)\n context.use_certificate_file(self.cert)\n\n self.log.debug('SSL context built')\n self.log.debug('Sending HTTP 200 OK to client')\n\n connection.sendall('HTTP/1.0 200 OK Connected\\r\\n\\r\\n')\n\n ssl_connection = OpenSSL.SSL.Connection(context, connection)\n ssl_connection.set_accept_state()\n self.log.debug('Select(ing) on connection socket')\n select.select([connection], [], [])\n self.log.debug('SSL calling do_handshake()')\n ssl_connection.do_handshake()\n self.log.debug('SSL do_handshake() completed')\n\n ssl_connection.state_string()\n\n self.log.debug('Building SSL fileobjects')\n new_connection_write = socket._fileobject(ssl_connection, 'w')\n new_connection_read = socket._fileobject(ssl_connection, 'r')\n new_connection = socket._fileobject(ssl_connection)\n self.log.debug('Done building SSL fileobjects')\n\n self.connection = ssl_connection\n self.wfile = new_connection_write\n self.rfile = new_connection_read\n\n return True",
"def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))",
"def _server(self, sock):\n # Create the server side Connection. This is mostly setup boilerplate\n # - use TLSv1, use a particular certificate, etc.\n server_ctx = Context(SSLv23_METHOD)\n server_ctx.set_options(OP_NO_SSLv2 | OP_NO_SSLv3 | OP_SINGLE_DH_USE)\n server_ctx.set_verify(\n VERIFY_PEER | VERIFY_FAIL_IF_NO_PEER_CERT | VERIFY_CLIENT_ONCE,\n verify_cb,\n )\n server_store = server_ctx.get_cert_store()\n server_ctx.use_privatekey(\n load_privatekey(FILETYPE_PEM, server_key_pem)\n )\n server_ctx.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n server_ctx.check_privatekey()\n server_store.add_cert(load_certificate(FILETYPE_PEM, root_cert_pem))\n # Here the Connection is actually created. If None is passed as the\n # 2nd parameter, it indicates a memory BIO should be created.\n server_conn = Connection(server_ctx, sock)\n server_conn.set_accept_state()\n return server_conn",
"def create_ssl_cert_request ( ssl_hostnames ) :\n first_hostname = ssl_hostnames[ 0 ]\n csr_filename = get_ssl_csr_filename( first_hostname )\n key_filename = get_ssl_key_filename( first_hostname )\n openssl_cnf = \"\"\"\n[req]\ndistinguished_name = req_distinguished_name\nreq_extensions = san_ext\n\n[req_distinguished_name]\ncountryName_default = US\nstateOrProvinceName_default = New York\nlocalityName_default = New York\norganizationalUnitName_default = Home Box Office, Inc\ncommonName_default = \"\"\" + first_hostname + \"\"\"\n\n[san_ext]\nbasicConstraints = CA:FALSE\nkeyUsage = nonRepudiation, digitalSignature, keyEncipherment\nsubjectAltName = @sans\n\n[sans]\n\"\"\"\n counter = 0\n for hostname in ssl_hostnames :\n counter += 1\n openssl_cnf += 'DNS.' + str( counter ) + ' = ' + hostname + '\\n'\n\n with open( first_hostname, 'w' ) as f :\n f.write( openssl_cnf )\n cmd = 'openssl req -new -newkey rsa:2048 -nodes -out ' + csr_filename + ' -keyout ' + key_filename\n cmd += ' -config ' + first_hostname + ' -subj \"/C=US/ST=New York/L=New York/O=Home Box Office Inc/CN=' + first_hostname + '\"'\n keygen = subprocess.call( cmd, shell = True )\n os.remove( first_hostname )\n if keygen != 0 :\n print \"Generation of SSL request failed!\"\n return None\n\n return { 'csr-filename' : csr_filename, 'key-filename' : key_filename }",
"def wrap_socket(sock, server_side=False, keyfile=None, certfile=None, cert_reqs=CERT_NONE, ca_certs=None):\n ...",
"def __init__(self, url, **kwargs):\n self.hostname = self.getHostnameFromURL(url)\n\n # ``verify`` here refers to server-side verification of certificates\n # presented by a client:\n self.verify = False if self.isClient else True\n super(SSLVerifyingContextFactory, self).__init__(verify=self.verify,\n fixBrokenPeers=True,\n **kwargs)",
"def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0",
"def create_server_certs():\n global server_key_files, server_keystore, config\n\n same_enc_sign_cert = config[\"config\"][\"same_enc_sign_cert\"]\n if not Path(server_key_files[\"key\"]).is_file() or not Path(server_key_files[\"crt\"]).is_file():\n print(\"create new encryption cert\\n\")\n create_server_certs_enc()\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key\", \"crt\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()\n\n server_keystore[\"key-sign\"] = server_keystore[\"key\"]\n server_keystore[\"crt-sign\"] = server_keystore[\"crt\"]\n\n if not Path(server_key_files[\"key-sign\"]).is_file() or not Path(server_key_files[\"crt-sign\"]).is_file():\n print(\"create new signing cert\\n\")\n if not same_enc_sign_cert:\n create_server_certs_sign()\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"w\") as f:\n f.write(server_keystore[f_item])\n f.close()\n else:\n for f_item in [\"key-sign\", \"crt-sign\"]:\n with open(server_key_files[f_item], \"r\") as f:\n server_keystore[f_item] = f.read()\n f.close()",
"def build_session():\n return requests.Session()",
"def open_https(self, url, data=None, ssl_context=None):\n # type: (AnyStr, Optional[bytes], Optional[SSL.Context]) -> addinfourl\n if ssl_context is not None and isinstance(ssl_context, SSL.Context):\n self.ctx = ssl_context\n else:\n self.ctx = SSL.Context()\n user_passwd = None\n if isinstance(url, six.string_types):\n try: # python 2\n # http://pydoc.org/2.5.1/urllib.html\n host, selector = splithost(url)\n if host:\n user_passwd, host = splituser(host)\n host = unquote(host)\n realhost = host\n except NameError: # python 3 has no splithost\n # https://docs.python.org/3/library/urllib.parse.html\n parsed = urlparse(url)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n selector = parsed.path\n else:\n host, selector = url\n urltype, rest = splittype(selector)\n url = rest\n user_passwd = None\n if urltype.lower() != 'http':\n realhost = None\n else:\n try: # python 2\n realhost, rest = splithost(rest)\n if realhost:\n user_passwd, realhost = splituser(realhost)\n if user_passwd:\n selector = \"%s://%s%s\" % (urltype, realhost, rest)\n except NameError: # python 3 has no splithost\n parsed = urlparse(rest)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n # print(\"proxy via http:\", host, selector)\n if not host:\n raise IOError('http error', 'no host given')\n if user_passwd:\n import base64\n auth = base64.encodestring(user_passwd).strip()\n else:\n auth = None\n # Start here!\n h = httpslib.HTTPSConnection(host=host, ssl_context=self.ctx)\n # h.set_debuglevel(1)\n # Stop here!\n if data is not None:\n h.putrequest('POST', selector)\n h.putheader('Content-type', 'application/x-www-form-urlencoded')\n h.putheader('Content-length', '%d' % len(data))\n else:\n h.putrequest('GET', selector)\n if auth:\n h.putheader('Authorization', 'Basic %s' % auth)\n for args in self.addheaders:\n h.putheader(*args) # for python3 - used to use apply\n h.endheaders()\n if data is not None:\n h.send(data + '\\r\\n')\n # Here again!\n resp = h.getresponse()\n fp = resp.fp\n return addinfourl(fp, resp.msg, \"https:\" + url)\n # Stop again.",
"def __init__(self, enable=None, ssl_enable=None, port=None, ssl_port=None):\n self.enable = enable\n self.ssl_enable = ssl_enable\n self.port = port\n self.ssl_port = ssl_port",
"def is_secure_context(self):\n raise exceptions.NotImplementedError()",
"async def test_simple_get_ssl(app, aiohttp_server, ssl_context):\n server = await aiohttp_server(app, ssl=ssl_context)\n url = \"https://localhost:%d\" % server.port\n\n async with aiosonic.HTTPClient() as client:\n res = await client.get(url, verify=False)\n assert res.status_code == 200\n assert await res.text() == \"Hello, world\"\n await server.close()",
"def server(addr, port, family, use_tls=False):\n sock = _socket(addr, port, family)\n ctx = _context(use_tls=use_tls)\n return {\"sock\": sock, \"ssl\": ctx}",
"def get_request(\n self,\n ) -> typing.Tuple[ssl.SSLSocket, typing.Tuple[str, int]]:\n socket, addr = self.socket.accept()\n stream = ssl.wrap_socket(\n socket,\n server_side=True,\n keyfile=self.keyfile,\n certfile=self.certfile,\n ssl_version=self.ssl_version,\n )\n return stream, addr",
"async def _open_connection_https(self, location):\n sock = await connect_tcp(\n location[0],\n location[1],\n ssl_context=self.ssl_context,\n local_host=self.source_address,\n tls=True,\n tls_standard_compatible=False,\n )\n sock._active = True\n return sock",
"def ssl(self) -> Optional[pulumi.Input['SslConfigurationArgs']]:\n return pulumi.get(self, \"ssl\")",
"def create_server_certs_sign():\n global server_keystore\n\n dn_sign = \"/CN=server certificate sign RSA-PSS\"\n key_pair_rsa_sign = create_csr_pss(dn_sign)\n server_keystore[\"key-sign\"] = key_pair_rsa_sign[\"key\"]\n san = [f'URI.1 = {uuid.uuid4().urn}']\n server_keystore[\"crt-sign\"] = sign_csr(key_pair_rsa_sign[\"pub\"], dn_sign, san)",
"def test_get_cert_store(self):\n context = Context(SSLv23_METHOD)\n store = context.get_cert_store()\n assert isinstance(store, X509Store)",
"def _open_connection(self):\n ssl_context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=None, capath=None, cadata=None)\n ssl_settings = {'ssl_context': ssl_context}\n try:\n session =iRODSSession(host=self.config['irods_host'], port=self.config['irods_port'],\n user=self.config['irods_user_name'], password=self.passwd, zone=self.config['irods_zone_name'],**ssl_settings)\n utilities.log.info('Opened an irods connection')\n except Exception as ex:\n utilities.log.info('Could not start a connection to irods config at {}'.format(config))\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n utilities.log.info('IRODS open: {}'.format(message))\n sys.exit(1)\n return session",
"def test_client_set_session(self):\n key = load_privatekey(FILETYPE_PEM, server_key_pem)\n cert = load_certificate(FILETYPE_PEM, server_cert_pem)\n ctx = Context(TLSv1_2_METHOD)\n ctx.use_privatekey(key)\n ctx.use_certificate(cert)\n ctx.set_session_id(b\"unity-test\")\n\n def makeServer(socket):\n server = Connection(ctx, socket)\n server.set_accept_state()\n return server\n\n originalServer, originalClient = loopback(server_factory=makeServer)\n originalSession = originalClient.get_session()\n\n def makeClient(socket):\n client = loopback_client_factory(socket)\n client.set_session(originalSession)\n return client\n\n resumedServer, resumedClient = loopback(\n server_factory=makeServer, client_factory=makeClient\n )\n\n # This is a proxy: in general, we have no access to any unique\n # identifier for the session (new enough versions of OpenSSL expose\n # a hash which could be usable, but \"new enough\" is very, very new).\n # Instead, exploit the fact that the master key is re-used if the\n # session is re-used. As long as the master key for the two\n # connections is the same, the session was re-used!\n assert originalServer.master_key() == resumedServer.master_key()",
"def _client_connection(self, callback, data, request_ocsp=True):\n ctx = Context(SSLv23_METHOD)\n ctx.set_ocsp_client_callback(callback, data)\n client = Connection(ctx)\n\n if request_ocsp:\n client.request_ocsp()\n\n client.set_connect_state()\n return client",
"def svn_client_create_context(svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def tls_http_server(request):\n return functools.partial(make_tls_http_server, request=request)",
"def ussl.wrap_socket(sock, server_side=False, keyfile=None, certfile=None, cert_reqs=CERT_NONE, ca_certs=None):\n pass",
"def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )",
"def cc(self):\n # TODO: The CartoContext documentaton says that SSL must be disabled sometimes if an on\n # premise host is used.\n # We are not taking this into account. It would need to create a requests.Session()\n # object, set its SSL to false and pass it to the CartoContext init.\n if self._carto_context is None:\n self._carto_context = cartoframes.CartoContext(\n base_url=self.base_url, api_key=self.api_key\n )\n return self._carto_context",
"def sim_session_resumption(sim, with_ticket = False):\n\n client_ctx = sim.client_ssl_context()\n server_ctx = sim.server_ssl_context()\n client_ctx.set_cipher_list('AES256-SHA256')\n\n if with_ticket:\n server_ctx.set_session_cache_mode(SSL.SESS_CACHE_OFF)\n label = 'ssl_session_ticket'\n else:\n server_ctx.set_options(SSL.OP_NO_TICKET)\n label = 'ssl_session_id'\n\n c1, s1 = sim.ssl_connection(client_ctx, server_ctx)\n\n res1 = xsim.simple_ssl_conversation(\n c1,\n s1,\n xsim.http_conversation_with_label(label + '/create'))\n\n c2, s2 = sim.ssl_connection(client_ctx, server_ctx)\n\n c2.set_session(res1[xsim.SSL_SESSION_KEY])\n\n xsim.simple_ssl_conversation(\n c2,\n s2,\n xsim.http_conversation_with_label(label + '/resume'))",
"def get_koji_session(self, ssl = False):\n if self._kojisession_with_ssl and ssl:\n return self._kojisession_with_ssl\n\n hub = self.config.get('koji','hub')\n kojisession = koji.ClientSession(hub)\n if ssl:\n clientcert = self.config.get('koji','clientcert')\n clientca = self.config.get('koji','clientca')\n serverca = self.config.get('koji','serverca')\n kojisession.ssl_login(clientcert, clientca, serverca)\n self._kojisession_with_ssl = kojisession\n return kojisession",
"def make_session():\n import aiohttp\n conn = aiohttp.TCPConnector(limit_per_host=int(\n os.getenv('AIO_CONN_LIMIT', 10)))\n timeout = aiohttp.ClientTimeout(\n total=int(os.getenv('AIO_TOTAL_TIMEOUT', 80)),\n connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n sock_read=int(os.getenv('AOI_READ_TIMEOUT', 30)),\n sock_connect=int(os.getenv('AIO_CONN_TIMEOUT', 15)),\n )\n s = aiohttp.ClientSession(connector=conn, timeout=timeout)\n return s",
"def _get_socket(self, secure=False, timeout=None):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout or self.timeout)\n\n if secure:\n # Setting Purpose to CLIENT_AUTH might seem a bit backwards. But\n # SOS Access v4 is using SSL/TLS for encryption not authentications\n # and verification. There is no cert and no hostname to check so\n # setting the purpose to Client Auth diables that in a nifty way.\n self.context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n return self.context.wrap_socket(sock)\n\n else:\n return sock",
"def __init__(self, event_loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n loglevel: int=logging.DEBUG, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n self._event_loop = event_loop\n self._server = None\n if not ssl_context:\n # This looks very similar to the code for create_default_context\n # That's because it is the code\n # For some reason, create_default_context doesn't like me and won't work properly\n self._ssl = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23)\n # SSLv2 considered harmful.\n self._ssl.options |= ssl.OP_NO_SSLv2\n\n # SSLv3 has problematic security and is only required for really old\n # clients such as IE6 on Windows XP\n self._ssl.options |= ssl.OP_NO_SSLv3\n self._ssl.load_default_certs(ssl.Purpose.SERVER_AUTH)\n self._ssl.options |= getattr(_ssl, \"OP_NO_COMPRESSION\", 0)\n self._ssl.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)\n self._ssl.options |= getattr(_ssl, \"OP_CIPHER_SERVER_PREFERENCE\", 0)\n\n else:\n self._ssl = ssl_context\n\n self._bufsize = buffer_size\n self.default_butterfly = Butterfly\n self.default_net = Net\n\n self._executor = futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2 + 1)\n\n self.net = None\n self.log_level = loglevel\n self.logger = logging.getLogger(\"ButterflyNet\")\n self.logger.setLevel(loglevel)\n if self.logger.level <= logging.DEBUG:\n self._event_loop.set_debug(True)\n\n self.butterflies = {}",
"def make_context(self, engine, args):\n args = self.normalize_args(args)\n _, ctx = self._make_argkey_and_context(engine, args)\n return ctx"
] |
[
"0.7945872",
"0.7542444",
"0.74426365",
"0.7328456",
"0.7317657",
"0.7291481",
"0.7074744",
"0.7014252",
"0.69510484",
"0.6910048",
"0.67939",
"0.67733735",
"0.67338645",
"0.6712221",
"0.6710472",
"0.67098236",
"0.66667795",
"0.66622305",
"0.65952104",
"0.65731806",
"0.6565364",
"0.65289384",
"0.6491938",
"0.64688706",
"0.6437619",
"0.6422528",
"0.6390579",
"0.6380925",
"0.631525",
"0.62289715",
"0.6159143",
"0.6111438",
"0.6033291",
"0.6001711",
"0.59843254",
"0.5980465",
"0.5950483",
"0.5919072",
"0.5915692",
"0.5911779",
"0.5907547",
"0.587181",
"0.5831087",
"0.5801068",
"0.5798362",
"0.5787064",
"0.57815564",
"0.5781511",
"0.5770555",
"0.5760738",
"0.5732866",
"0.56884485",
"0.56688476",
"0.56644315",
"0.56564695",
"0.56286854",
"0.5606686",
"0.55888236",
"0.55810636",
"0.55796355",
"0.55657184",
"0.5554176",
"0.5548595",
"0.55285215",
"0.5524754",
"0.55239177",
"0.5468348",
"0.54614806",
"0.5445857",
"0.5438224",
"0.5418137",
"0.54158187",
"0.5411097",
"0.53872347",
"0.5370856",
"0.53623366",
"0.535491",
"0.5350062",
"0.5349081",
"0.5327335",
"0.5321371",
"0.53209144",
"0.53081745",
"0.5292339",
"0.52843153",
"0.5283043",
"0.52820253",
"0.5280972",
"0.5280015",
"0.5270417",
"0.5269601",
"0.5264763",
"0.525186",
"0.52485204",
"0.5241891",
"0.5227141",
"0.5220509",
"0.5214785",
"0.5211134",
"0.5204382"
] |
0.65869004
|
19
|
(str) > bool Return True if correct service name
|
def is_service_name_correct(self, service):
return service in self.services
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def check_name(name, allow_services=False):",
"def istype(client, service_name: str):\n\n if is_client(client):\n return (\n client.meta.service_model.service_name.lower()\n == service_name.strip().lower()\n )\n return False",
"def definition_of_services(self):\r\n return True",
"def isService(self, serviceInterface: java.lang.Class) -> bool:\n ...",
"def _interesting_service(self, service: UpnpService) -> bool:\n service_type = service.service_type\n for service_types in self._SERVICE_TYPES.values():\n if service_type in service_types:\n return True\n\n return False",
"def compServiceCheck():\n # global compileService\n return compileService.state.name",
"def start_services(self, service_name):\r\n\r\n from Services import Services\r\n result = Services.start_service(service_name, self.machine_name)\r\n if result is None:\r\n return None\r\n elif result:\r\n return True\r\n else:\r\n return False",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_name\")",
"def is_service_installed(klass, service):\n return True",
"def test_get_service_string(self):\n pass",
"def term(name):\n cmd = \"svc -t {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)",
"def getServiceName(self) -> str:\n ...",
"def is_service_endpoint(path):\n return re.match(r'^[a-zA-Z0-9.-]+:\\d+$', path)",
"def future_supported_service(service_name):\n print('Service {} linked.'.format(service_name))\n pass",
"def service_name(self):\n return self._service_name",
"def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass",
"def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> str:\n return pulumi.get(self, \"service_name\")",
"def name_registered(name, state):\n for client in state['clients']:\n if client['name'] == name:\n return True\n return False",
"def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"service_name\")",
"def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True",
"def service(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service\")",
"def IsServiceRunning(self, service_name):\n if self.HasSystemd():\n # Querying for the pid of the service will return 'MainPID=0' if\n # the service is not running.\n stdout, stderr = self.RunCmdOnDevice(\n ['systemctl', 'show', '-p', 'MainPID', service_name], quiet=True)\n running = int(stdout.split('=')[1]) != 0\n else:\n stdout, stderr = self.RunCmdOnDevice(['status', service_name], quiet=True)\n running = 'running, process' in stdout\n assert stderr == '', stderr\n logging.debug(\"IsServiceRunning(%s)->%s\" % (service_name, running))\n return running",
"def wsdl_service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"wsdl_service_name\")",
"def service_type_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_type_name\")",
"def IsServiceRunning(self, service_name):\n stdout, stderr = 'stdout', 'system does not have systemd'\n if self.HasSystemd():\n # Querying for the pid of the service will return 'MainPID=0' if\n # the service is not running.\n stdout, stderr = self.RunCmdOnDevice(\n ['systemctl', 'show', '-p', 'MainPID', service_name], quiet=True)\n running = int(stdout.split('=')[1]) != 0\n assert stderr == '', stderr\n logging.debug(\"IsServiceRunning(%s)->%s\" % (service_name, running))\n return running",
"def apiName(self, name):\n return self.genOpts.conventions.is_api_name(name)",
"def _get_service_type(service):\n\n return service.split(':')[3]",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def service_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_name\")",
"def start(name):\n __salt__[\"file.remove\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -u {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)",
"def enabled(name, **kwargs):\n if not available(name):\n log.error(\"Service %s not found\", name)\n return False\n\n run_file = os.path.join(SERVICE_DIR, name, \"run\")\n down_file = os.path.join(SERVICE_DIR, name, \"down\")\n\n return (\n os.path.isfile(run_file)\n and os.access(run_file, os.X_OK)\n and not os.path.isfile(down_file)\n )",
"def use_service_networking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_service_networking\")",
"def test_service(host):\n assert host.service('php7.0-fpm').is_enabled\n assert host.service('php7.0-fpm').is_running\n assert host.service('php7.2-fpm').is_enabled\n assert host.service('php7.2-fpm').is_running",
"def isNameUsed(self, name: unicode, startId: long, stopId: long) -> bool:\n ...",
"def is_in_service(self) -> bool:\n return self._enabled",
"def SERVICE_NAME(default=None):\n return ParamStore.get('SERVICE_NAME', default=default, store=ParamStore.Stores.OS).value",
"def get_name(self, service):\n\n name = flask.request.form.get(\"name\")\n if name:\n if service.name != name:\n service_with_name = get_one(self._db, ExternalIntegration, name=name)\n if service_with_name:\n return INTEGRATION_NAME_ALREADY_IN_USE\n return name",
"def endpoint_checker(url):\r\n if \"/arcgis/rest/services/\" and \"http\" in url:\r\n return True\r\n return False",
"def capitalize_tag_kv(service: str) -> bool:\n return service in (\"ec2\", \"iam\", \"ssm\")",
"def test_custom_service_name():\n cosmos_pm = packagemanager.PackageManager(cosmos.get_cosmos_url())\n cosmos_pm.get_package_version('marathon', None)\n options = {\n 'service': {'name': \"test-marathon\"}\n }\n install_package('marathon', options_json=options)\n deployment_wait(service_id=options[\"service\"][\"name\"], max_attempts=300)\n\n shakedown.dcos.service.wait_for_service_endpoint('test-marathon', timeout_sec=300, path=\"ping\")",
"def hasname(self):\n\t\treturn self.name is not None",
"def _filter_service(self, service):\n\t\tservice = service.lower()\n\t\tservice = service.replace(\"soap\",\"http\").replace(\"https\",\"http\").replace(\"ssl\",\"http\").replace(\"http-proxy\",\"http\").replace(\"http-alt\",\"http\").replace(\"ajp13\",\"http\").replace(\"vnc-http\",\"http\").replace(\"http-mgmt\",\"http\").replace(\"x509\",\"http\").replace('iiimsf','http')\n\t\tservice = service.replace(\"microsoft-ds\",\"netbios-ssn\")\n\t\tservice = service.replace(\"imaps\",\"imap\").replace(\"pop3s\",\"pop3\").replace(\"smtps\",\"smtp\").replace(\"pop3pw\",\"pop3\")\n\t\tservice = service.replace(\"psql\",\"postgresql\")\n\n\t\treturn service",
"def service_type_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"service_type_name\")",
"def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False",
"def has_name(self):\n return self.unpack_word(0x2) != 0",
"def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True",
"def get_service_type_for_service_name(self, service_name):\n from ranger_performance_tool import perf_globals\n service_type_mapping = perf_globals.CONFIG_READER.get_config_value(\"secondary\", \"service_type_mapping\")\n if service_name not in service_type_mapping.keys():\n raise Exception(f\"Unknown service name:{service_name}.\"\n f\"Add it to service_type_mapping in secondary config file\")\n return service_type_mapping[service_name]",
"def get_service_name(service, rem):\n flavor = rem.os.package_type\n try:\n return _SERVICE_MAP[service][flavor]\n except KeyError:\n return None",
"def service(self, service):\n allowed_values = [\"data-actions\", \"smtp\"]\n if service.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for service -> \" + service)\n self._service = \"outdated_sdk_version\"\n else:\n self._service = service",
"def _get_service(self, service_name):\n if self._service:\n return self._service\n res = self._cc.services().get_by_name(service_name, name='label')\n self._service = res.resource\n return self._service",
"def has_name(self, name: str):\n name_bytes = str_to_bytes_pad(name, MAX_NAME_LEN)\n res = self.dev.apdu_exchange(0x0e, name_bytes)\n assert len(res) == 1\n assert res[0] in (0, 1)\n return bool(res[0])",
"def test_ifServicesAreRunning():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"service\" in testConfig.config:\n print \"Service: \"+ testConfig.config[\"name\"]\n if sys.platform.startswith(\"darwin\"):\n yield assertionFunctions.checkIfServiceIsRunning_OSX, testConfig.config\n elif sys.platform.startswith(\"linux\"):\n yield assertionFunctions.checkIfServiceIsRunning_Linux, testConfig.config\n else:\n assert False, str(sys.platform)+\": Not supported!\"",
"def is_event_service_task(jeditaskid):\n eventservice = False\n\n query = {'jeditaskid': jeditaskid}\n task = list(JediTasks.objects.filter(**query).values('eventservice'))\n if len(task) > 0 and 'eventservice' in task[0] and task[0]['eventservice'] is not None and task[0]['eventservice'] == 1:\n eventservice = True\n\n return eventservice",
"def valid_routine_name(routine):\n\treturn re.match('^[a-z_]([a-z0-9_]*)', routine) is not None",
"def find_service(iface, context, name):",
"def service_dns_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_dns_name\")",
"def get_full_service_name(service, s):\n service_alias = SERVICE_MAPPING[service] if service in SERVICE_MAPPING else service\n\n # Don't suffix service type if it's a web worker or the same as the service (e.g. gateway, haproxy)\n if s == 'web' or service_alias == s:\n return service_alias\n return '{}-{}'.format(service_alias, s)",
"def has_name(self):\n return self.name is not None",
"def checkIfServiceIsRunning_Linux(testConfig):\n assert \"name\" in testConfig\n assert \"service\" in testConfig\n \n command = \"sudo service \"+testConfig[\"service\"]+\" status\"\n p = subprocess.Popen(command.split(\" \"), stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n out,error = p.communicate()\n \n \n assert re.search(\"is running\",out) ,\"\\nName :\"+testConfig[\"service\"]+\\\n \"\\nExpected service to be running: \"+testConfig[\"service\"]+\\\n \"\\n Test failed.\"",
"def verify_dbus_service(my_interface):\n try:\n interface = get_dbus_interface('org.freedesktop.DBus',\n '/org/freedesktop/DBus')\n return my_interface in interface.ListNames()\n except dbus.DBusException:\n return False",
"def service_present(\n name, service_type, description=None, profile=None, **connection_args\n):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": 'Service \"{}\" already exists'.format(name),\n }\n\n # Check if service is already present\n role = __salt__[\"keystone.service_get\"](\n name=name, profile=profile, **connection_args\n )\n\n if \"Error\" not in role:\n return ret\n else:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Service \"{}\" will be added'.format(name)\n return ret\n # Create service\n __salt__[\"keystone.service_create\"](\n name, service_type, description, profile=profile, **connection_args\n )\n ret[\"comment\"] = 'Service \"{}\" has been added'.format(name)\n ret[\"changes\"][\"Service\"] = \"Created\"\n\n return ret",
"def test_with_service_name(self):\n exc = RemoteIntegrationException(\n u\"Unreliable Service\", \n u\"I just can't handle your request right now.\"\n )\n\n # Since only the service name is provided, there are no details to\n # elide in the non-debug version of a problem detail document.\n debug_detail = exc.document_detail(debug=True)\n other_detail = exc.document_detail(debug=False)\n eq_(debug_detail, other_detail)\n\n eq_(u'The server tried to access Unreliable Service but the third-party service experienced an error.',\n debug_detail\n )",
"def test_unknown_service(self):\n raise NotImplementedError # FIXME",
"def private_link_service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_service_name\")",
"def private_link_service_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"private_link_service_name\")",
"def _check_name(self):\n\t\tpass",
"def has_app_name(self, name):\n return name in self._proxies.keys()",
"def check_name(self, name: str):\n if name[0] == \"/\" or self.check_end_streaming(name):\n return True\n else:\n return False",
"def get_service_name(wrapped, instance, args, kwargs):\n if \"serviceAbbreviation\" not in instance._service_model.metadata:\n return instance._service_model.metadata[\"endpointPrefix\"]\n return instance._service_model.metadata[\"serviceAbbreviation\"]",
"def is_deprovisioned(self, name):\n self._assert_space()\n\n try:\n return not self._get_service_instance(name, no_cache=True)\n except Exception as e:\n print(str(e))\n return True",
"def get_true_name(self, name):\n if (name in self.dll_func_true_names):\n return self.dll_func_true_names[name]\n return None",
"def isSetName(self):\n return _libsbml.Port_isSetName(self)",
"def screename(service=''):\r\n\r\n def _make_name(a, b):\r\n return ''.join(_random.sample(string.ascii_letters,\r\n _random.choice(range(a, b))))\r\n\r\n if service in ('', 'aim', 'aol'):\r\n name = _make_name(3, 16)\r\n if service == 'aol':\r\n return name + '@aol.com'\r\n else:\r\n return name\r\n elif service is 'skype':\r\n name = _make_name(6, 32)\r\n return name\r\n elif service is 'google':\r\n name = _make_name(1, 19)\r\n return name + '@google.com'\r\n else:\r\n name = _make_name(8, 20)\r\n return name",
"def get_service(self,name):\n\t\t#forma segura de obtener un servicio\n\t\ttry:\n\t\t\treturn self._services[name]\n\t\texcept Exception:\n\t\t\treturn None",
"def is_running(service):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'),\n warn_only=True):\n if using_systemd():\n return systemd.is_running(service)\n else:\n if distrib_family() != \"gentoo\":\n test_upstart = run_as_root('test -f /etc/init/%s.conf' %\n service)\n status = _service(service, 'status')\n if test_upstart.succeeded:\n return 'running' in status\n else:\n return status.succeeded\n else:\n # gentoo\n status = _service(service, 'status')\n return ' started' in status",
"def checkIfServiceIsRunning_OSX(testConfig):\n assert \"name\" in testConfig\n assert \"service\" in testConfig\n \n command = \"launchctl list | grep \"+testConfig[\"service\"] \n p = subprocess.Popen(command.split(\" \"), stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n out,error = p.communicate()\n \n \n testPass=False\n for output in out:\n if re.match(\"\\d+\",output) :\n testPass=True\n break\n \n assert testPass,\"\\nName :\"+testConfig[\"service\"]+\\\n \"\\nExpected service to be running: \"+testConfig[\"service\"]+\\\n \"\\n Test failed.\"",
"def service(self) -> str:\n return pulumi.get(self, \"service\")",
"def is_dev_name_valid(self):\n return self._name_re.match(self.dev_name) is not None",
"def __bool__(self):\n return True if self._name is not None else False",
"def _remember_service_name(self, event):\n service_name = event[\"arguments\"][\"service_name\"]\n # We've added logging of the service_handle to the API signature in\n # the Monitor, but for backwards compatibility we'll keep it as\n # follows for now.\n service_handle = \"0x%08x\" % event[\"return_value\"]\n self.services[service_handle] = service_name",
"def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False",
"def check_ser_presence(service, user_pass_list):\r\n\r\n for pass_info in user_pass_list:\r\n if pass_info[1] == service:\r\n return False\r\n\r\n return True",
"def _validate_name(name):\r\n\tif HOST_NAME != name and len(name) > 0 and ZOOM_PHRASES[0] not in name and name not in WAITING_ROOM:\r\n\t\treturn True\r\n\treturn False",
"def is_valid(name):\n return bool(name)",
"def blob_services_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"blob_services_name\")",
"def get_from_service(self, service_name: str, key: str) -> Any:\n service_list = [\n self.data[\"services\"][s] for s in self.services if service_name.lower() == s\n ]\n if service_list:\n service = service_list[0]\n return service.get(key, None)",
"def stop(name):\n __salt__[\"file.touch\"](\"{}/down\".format(_service_path(name)))\n cmd = \"svc -d {}\".format(_service_path(name))\n return not __salt__[\"cmd.retcode\"](cmd, python_shell=False)",
"def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False",
"def sd(self, service='show'):\n if service == 'show':\n return self.run_command('sd')\n else:\n command = 'sd ' + str(service)\n output = self.run_command(command)[0]\n if 'enabled.' in output:\n return True\n elif 'disabled.' in output:\n return False\n else:\n raise ParseError(\"Could not parse the output of sd.\")",
"def service(self):\n if self.access_url:\n for key, value in {\n \"ivo://ivoa.net/std/conesearch\": scs.SCSService,\n \"ivo://ivoa.net/std/sia\": sia.SIAService,\n \"ivo://ivoa.net/std/ssa\": ssa.SSAService,\n \"ivo://ivoa.net/std/sla\": sla.SLAService,\n \"ivo://ivoa.net/std/tap\": tap.TAPService,\n }.items():\n if key in self.standard_id:\n self._service = value(self.access_url)\n\n return self._service",
"def is_system(self) -> bool:",
"def checkService(self, serviceName, options):\n url = self._getURL(serviceName, options)\n self.log.info(\"Pinging service\", url)\n pingRes = Client().ping(url=url)\n if not pingRes['OK']:\n self.log.info('Failure pinging service: %s: %s' % (url, pingRes['Message']))\n res = self.restartInstance(int(options['PID']), serviceName, self.restartServices)\n if not res[\"OK\"]:\n return res\n elif res['OK'] and res['Value'] != NO_RESTART:\n self.accounting[serviceName][\"Treatment\"] = \"Successfully Restarted\"\n self.log.info(\"Agent %s has been successfully restarted\" % serviceName)\n self.log.info(\"Service responded OK\")\n return S_OK()",
"def get_service(self):",
"def getService(name):\n return Service.getService(name)"
] |
[
"0.74874175",
"0.7102973",
"0.6658793",
"0.65929854",
"0.6531604",
"0.6523252",
"0.64808494",
"0.64768475",
"0.64768475",
"0.64768475",
"0.6421063",
"0.6411934",
"0.63543",
"0.63014257",
"0.62521374",
"0.6246466",
"0.61115396",
"0.60908616",
"0.60830384",
"0.60830384",
"0.6069704",
"0.6036148",
"0.6036148",
"0.6036148",
"0.60160005",
"0.6010067",
"0.5999235",
"0.59940726",
"0.59873366",
"0.59838617",
"0.5955621",
"0.5951026",
"0.5930281",
"0.5930281",
"0.5930281",
"0.5922099",
"0.59061205",
"0.58827454",
"0.58786184",
"0.58451337",
"0.58328086",
"0.5832074",
"0.58158886",
"0.5808831",
"0.58054",
"0.580064",
"0.579765",
"0.5787438",
"0.5781869",
"0.57739407",
"0.5762399",
"0.572165",
"0.5696995",
"0.5695487",
"0.5686878",
"0.56760174",
"0.5670189",
"0.56677496",
"0.56641644",
"0.5661959",
"0.5657899",
"0.56387484",
"0.56292593",
"0.5625871",
"0.56148666",
"0.5600807",
"0.5587946",
"0.55863875",
"0.5585403",
"0.5582314",
"0.5582314",
"0.5578747",
"0.55784017",
"0.556733",
"0.55586386",
"0.5551221",
"0.5548613",
"0.5540684",
"0.55384773",
"0.55379057",
"0.5532129",
"0.5530469",
"0.55199975",
"0.54965496",
"0.54949486",
"0.5494175",
"0.5493598",
"0.548708",
"0.5475156",
"0.5465353",
"0.5462172",
"0.5449964",
"0.54485303",
"0.54399204",
"0.5438239",
"0.54374975",
"0.5431567",
"0.5428785",
"0.5427431",
"0.54228055"
] |
0.83579403
|
0
|
(str) > bool Return True if correct class mail type
|
def is_class_mail_types_correct(self, class_mail_type):
return class_mail_type in self.class_mail_types
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_mail_types_correct(self, mail_type):\r\n return mail_type in self.mail_types",
"def email_type(verifield, required):\n return verifield is None or parseaddr(verifield) != ('','')",
"def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t",
"def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret",
"def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False",
"def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True",
"def check_type(self):\n return True",
"def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False",
"def is_my_type(type_str):\n raise NotImplementedError()",
"def verify_mail(self):\n raise NotImplementedError",
"def test(types, _):\n return 'Date' in types and 'Postal Code' in types",
"def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False",
"def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)",
"def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)",
"def is_valid_content_type(cls, content_type: str) -> bool:\n return content_type in cls.CONTENT_TYPES.value",
"def is_type(self, ent_type):\n # type: (str) -> bool\n # its always an entity ...\n if ent_type.lower() in ('entity', self.settings['_type'].lower()):\n return True\n else:\n return False",
"def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )",
"def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode",
"def __bool__(self):\n return _libsbml.string___bool__(self)",
"def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False",
"def isValidType(self, fsn, fsn_type):\n if ((OINKM.checkIfFSN(fsn)) and (\"SEO\" in fsn_type)) or (not(OINKM.checkIfFSN(fsn)) and (\"SEO\" not in fsn_type)):\n #If the value in the fsn field is an FSN and the description type is an SEO type, then it could be invalid.\n #If the value in the fsn field is not an FSN and the description type is not an SEO type, then it could be invalid.\n if \"SEO\" in fsn_type:\n question = \"You seem to be writing an FSN article but the description type appears to be an SEO. Are you sure you want to submit that?\"\n else:\n question = \"You seem to be writing about something that's not an FSN. Are you sure you want to submit that?\"\n change_type = QtGui.QMessageBox.question(\n self,\n \"Possible Description Type Mismatch\",\n question,\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, \n QtGui.QMessageBox.No\n )\n if change_type is not None:\n if change_type == QtGui.QMessageBox.Yes:\n is_valid = True\n else:\n is_valid = False\n else:\n is_valid = False\n else:\n #If the value in the FSN field is an FSN and the type is not an SEO type.\n #if the value is not an FSN and the type is one of the SEO types.\n is_valid = True\n return is_valid",
"def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')",
"def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False",
"def streettype(self):\n if self.index >= self.length:\n return False\n \n self._typ, n = self.parse_streettype()\n if self._typ is not None:\n self.idx_typ = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == '.':\n self.index += 1\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"TYP\", self._typ, self.idx_typ)\n self.isaddr = True\n return True\n return False",
"def is_text(self):\n return self.value_type in (str, unicode)",
"def has_classname(self):\n return self.unpack_word(0x4A) > 0",
"def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types",
"def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result",
"def stringable(self):\n return True",
"def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE",
"def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS",
"def is_reaction_class(rxn_class):\n return rxn_class in _values(ReactionClass.Typ)",
"def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )",
"def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)",
"def is_text( self ):\n return self.get_main_type() == 'text'",
"def check_proc_type(image_proc_type):\n\n if isinstance(image_proc_type, str):\n return True\n else:\n logging.warning('Image processing type is not a string')\n print('Please choose only one processing technique.')\n raise ValueError('Please choose only one processing technique.')",
"def isinstance(self, class_or_string):\n if class_or_string is None:\n return False\n import inspect\n if inspect.isclass(class_or_string):\n return isinstance(self, class_or_string)\n else:\n return self.__class__.__name__.lower() == class_or_string.lower()",
"def _type_validator(self, type=None):\n if type not in ['agents', 'users', 'groups']:\n type = 'users'\n return type",
"def validate(self,value):\r\n return type(value) is self.datatype",
"def do_type(self, str_arg):\n try:\n self.adbc.type(validateString(str_arg))\n except Exception, e:\n printLog(self.threadName + 'TYPE FAILED: %s' % e.message)\n self.resultFlag = False\n finally:\n return self.resultFlag",
"def canAdapt(self, stru):\n return isinstance(stru, crystal)",
"def is_string(self):\n answer = self._call('is_string')\n return answer.yes",
"def typeValidator(self, food_type):\n if type(food_type) != str:\n API.abort(400, error_messages[16]['int_type'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", food_type) or food_type.isspace():\n API.abort(\n 400, error_messages[17]['wrong_format_ty'])\n\n return True",
"def is_valid_case_type(case_type):\n return bool(_case_type_regex.match(case_type or ''))",
"def is_my_case(self, type_):\n return (\n isinstance(self.__apply_sequence(type_), self.declaration_class)\n )",
"def is_valid_type(type):\n return type in type_to_adapter",
"def ISTEXT(value):\n return isinstance(value, (basestring, AltText))",
"def is_bool (self, phrase):\r\n \r\n return isinstance(phrase,bool)",
"def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES",
"def __detect_type__(self, value):\n def is_ipv6_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split(':')\n for part in parts:\n if part == '':\n continue\n part = int(part, 16)\n if part < 0:\n raise ValueError\n return True\n except Exception:\n return False\n\n def is_ipv4_address(value):\n try:\n value, interface = value.split('%', 1)\n except: # noqa\n pass\n try:\n parts = value.split('.', 3)\n for part in parts:\n part = int(part)\n if part < 0 or part > 255:\n raise ValueError\n return True\n except: # noqa\n return False\n\n # Strip port\n if value.startswith('['):\n value = value[1:]\n try:\n value, port = value.split(':', 1)\n except: # noqa\n pass\n\n if value.endswith(']'):\n value = value[:-1]\n\n if is_ipv4_address(value):\n return 1, value, 'ipv4_address'\n\n elif is_ipv6_address(value):\n return 2, value, 'ipv6_address'\n\n else:\n return 0, value, 'hostname'",
"def is_valid(self, value) -> 'True|str':\n if self.base_type is not None and not isinstance(value, self.base_type):\n return f'Value {value} is not type of {self.base_type}.'\n return True",
"def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False",
"def CheckType(self, *args, **kwargs):\n pass",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def _validate_type(self, key, type_):\n if type_ is None:\n type_ = \"\"\n \n if not isinstance(type_, (str, unicode)):\n raise TypeError(\"FileLink.type should be a str or unicode, \"\n \"not %s\" % type_.__class__.__name__)\n \n return type_",
"def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0",
"def _is_name_type(self, type_id):\n return type_id == self.name_type",
"def get_check_types():",
"def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )",
"def is_enabled_type(self):\r\n registry = queryUtility(IRegistry) \r\n if registry is None: \r\n # Don't show if the registry is not found\r\n return False\r\n settings = registry.forInterface(IIPnextViewletBlogSettings, \r\n check=False) \r\n _types = getattr(settings, 'allowed_types', '')\r\n this_type = self.context.Type()\r\n \r\n return this_type in _types",
"def get_type(self):\n types = dict(ADDRESS_TYPE_CHOICES)\n return types.get(self.address_type, \"N/A\")",
"def subject_type(cls):\n pass",
"def _check_mimetype(self):\n if self.mimetype in Config.aliases:\n mimetype = Config.aliases[self.mimetype]\n else:\n mimetype = self.mimetype\n expected_extensions = mimetypes.guess_all_extensions(mimetype,\n strict=False)\n if expected_extensions:\n if self.has_extension and self.extension not in expected_extensions:\n # LOG: improve this string\n self.make_dangerous('expected extensions')",
"def CONTENT_TYPE(self):",
"def IsType(self, rule_type_name):\n return rule_type_name == 'log_url'",
"def cs_classes(post):\n return bool(re.search(r\"\\s+\\[cs|CS]\\d+[a-cA-C]\\s+\", post))",
"def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def is_voicemail(self):\n return self._is_voicemail",
"def validatePhoneNumberType(self):\n ## Declaring a Flag to control a while loop\n phone_number_type_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_type_ok:\n if self.phone_number_type.lower() in self.valid_phone_number_types:\n phone_number_type_ok = True\n return True\n\n else:\n return False",
"def is_type(self, typ):\n return typ == self.__class__.__name__",
"def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"",
"def get_email_content_from_type(self, type):\n if type is 1:\n return strings.CIRCUIT_CREATED_EMAIL_NOTIFICATION\n # case circuit is favorited\n if type is 2:\n # parse the route from info textfield\n route = self.info[self.info.find('circuit'):]\n route = route[:route.find('\\n')]\n route = route[route.find(':')+2:]\n try:\n circuit = Circuit.objects.get(pk=route)\n circuit_name = circuit.name\n except Circuit.DoesNotExist:\n circuit_name = 'Route :)'\n # parse the user from info textfield\n us = self.info[self.info.find('user')+7:]\n us = us[:us.find(',')]\n try:\n user = User.objects.get(pk=us)\n user = user.get_full_name()\n except User.DoesNotExist:\n user = 'Some Worldrat user :)'\n return strings.CIRCUIT_FAVORITED_EMAIL_NOTIFICATION % {\n 'route':circuit_name, 'user': user,\n }\n\n if type is 3:\n # parse the route from info textfield\n route = self.info[self.info.find('original_circuit')+19:]\n route = route[:route.find(',')]\n try:\n circuit = Circuit.objects.get(pk=route)\n circuit_name = circuit.name\n except Circuit.DoesNotExist:\n circuit_name = 'Route :)'\n # parse the user from info textfield\n us = self.info[self.info.find('user')+7:]\n us = us[:us.find(',')]\n try:\n user = User.objects.get(pk=us)\n user = user.get_full_name()\n except User.DoesNotExist:\n user = 'Some Worldrat user :)'\n return strings.CIRCUIT_REMIXED_EMAIL_NOTIFICATION % {\n 'route':circuit_name, 'user': user, \n }\n if type is 4:\n return strings.CIRCUIT_UPDATED_EMAIL_NOTIFICATION\n if type is 5:\n return strings.USER_FOLLOWED_EMAIL_NOTIFICATION\n if type is 6:\n return strings.CONTENT_SHARED_EMAIL_NOTIFICATION",
"def is_valid_type(self, attr: Optional[str] = None) -> bool:\n try:\n self.validate_type(attr)\n except TypeError:\n return False\n return True",
"def check_field_type(field_class):\n if field_class == 'TextField':\n field_type = 'Text field'\n elif field_class == 'NumericField':\n field_type = 'Numeric field'\n elif field_class == 'DateField':\n field_type = 'Date field'\n elif field_class == 'DateTimeField':\n field_type = 'Date & time field'\n elif field_class == 'TimeField':\n field_type = 'Time field'\n elif field_class == 'LookupField':\n field_type = 'Select box field'\n elif field_class == 'MultipleLookupField':\n field_type = 'Multiple select field'\n\n return field_type",
"def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False",
"def validate_typeID(self, type_ID):\n if type(type_ID) == str:\n for letter in type_ID:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False",
"def type_eq(cdm_column_type, submission_column_type):\n if cdm_column_type == 'time':\n return submission_column_type == 'character varying'\n if cdm_column_type == 'integer':\n return submission_column_type == 'int'\n if cdm_column_type in ['character varying', 'text', 'string']:\n return submission_column_type in ('str', 'unicode', 'object')\n if cdm_column_type == 'date':\n return submission_column_type in ['str', 'unicode', 'datetime64[ns]']\n if cdm_column_type == 'timestamp':\n return submission_column_type in ['str', 'unicode', 'datetime64[ns]']\n if cdm_column_type in ['numeric', 'float']:\n return submission_column_type == 'float'\n else:\n print(submission_column_type)\n raise Exception('Unsupported CDM column type ' + cdm_column_type)",
"def _valid_input_type(self, input_type):\n # pylint: disable=W0613, R0201\n return True",
"def is_stringified(self) -> bool:\n return self._stringify",
"def is_special_message(self):\n if not self.is_valid():\n return False\n \n # TODO: what if the author is wrong? then these don't match at all!\n for nickname in AUTHOR_TO_NICKNAME[self.author]:\n \n if self.content == f\"{nickname} changed the chat theme.\":\n return True\n \n if self.content == f\"{nickname} joined the video chat.\":\n return True\n \n if self.content == f\"{nickname} joined the call.\":\n return True\n \n if self.content.startswith(f\"{nickname} named the group\"):\n return True\n \n if self.content == f\"{nickname} removed the group name.\":\n return True\n \n if self.content == f\"{nickname} sent a link.\":\n return True\n \n if self.content == f\"{nickname} sent an attachment.\":\n return True\n \n if self.content.startswith(f\"{nickname} set the emoji to\"):\n return True\n \n if self.content == f\"{nickname} changed the group photo.\":\n return True\n \n if is_add_remove_member(self.content, nickname):\n return True\n\n if is_set_nickname(self.content, nickname):\n return True\n \n if is_clear_nickname(self.content, nickname):\n return True\n \n if is_create_group(self.content, nickname):\n return True\n if self.content == f\"{nickname} started a video chat.\":\n return True\n \n if self.content == f\"{nickname} left the group.\":\n return True\n \n if is_poll_message(self.content, nickname):\n return True\n return False",
"def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)",
"def is_string(value):\n return isinstance(value, (str, bytes))",
"def is_string(obj):\n return isinstance(obj, basestring)",
"def is_html(self):\r\n return self.__content_type == html_ctype",
"def is_cstring_type(self, objtype):\n return issubclass(objtype, self.CString) or issubclass(objtype, self.CWString)",
"def check_page_type(html) -> None:\n\ttemp_lst = html.findAll('div', {'class': '_1HBR'})\n\tif temp_lst:\n\t\tpage_type_dict['general'] = True\n\t\tpage_type_dict['detail'] = False\n\telse:\n\t\tpage_type_dict['general'] = False\n\t\tpage_type_dict['detail'] = True",
"def is_cls(self, file, i):\n\n # Save line to local variable\n line = file[i].strip()\n\n # If line starts with 'class' and ends with ':' return True, else False\n if line.startswith(\"class \") and line.endswith(\":\"):\n return True\n return False",
"async def can_send_modmail(self, user: discord.User):\n blocked = await self.config.user(user).get_raw(\"blocked\")\n type_waiting = await self.config.user(user).get_raw(\"type_holding\")\n if blocked:\n raise UserIsBlocked\n if type_waiting:\n raise WaitingForMessageType(\n \"Please choose type of message you wish to send\"\n )\n thread_open = await self.config.user(user).get_raw(\"thread_is_open\")\n current_thread = await self.config.user(user).get_raw(\"current_thread\")\n\n if thread_open:\n return True, current_thread\n else:\n return False, None",
"def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)",
"def verify(self):\n data = [\"rfc\", \"tel\", \"email\", \"name\", \"use\"]\n state = False\n for item in data:\n if getattr(self, item + \"Input\").text() != \"\":\n state = True\n else:\n return False\n return state",
"def __bool__(self):\r\n raise TypeError('cannot use secure type in Boolean expressions')",
"def isString(x):\n if type(x) == str:\n return True\n else:\n return False",
"def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False",
"def is_type(self, *seg_type: str) -> bool:\n return self.class_is_type(*seg_type)",
"def check_eclass(eclass):\n eclass = str(eclass)\n # Should be a valid eclass\n return eclass not in LIST_ECLASS",
"def _is_str_matching_builtin_type(str_value: str) -> bool:\n builtin_types = [\n getattr(builtins, d)\n for d in dir(builtins)\n if isinstance(getattr(builtins, d), type)\n ]\n return f\"<class '{str_value}'>\" in [str(bt) for bt in builtin_types]",
"def is_acceptable(self):",
"def process_MESSAGE_TYPE_EMG(self, raw):\n\n pass",
"def is_string(obj):\n return isinstance(obj, str)",
"def is_string(value):\n return isinstance(value, basestring)"
] |
[
"0.7468366",
"0.6942029",
"0.6129135",
"0.6121263",
"0.60874057",
"0.60748357",
"0.59902066",
"0.5907582",
"0.5859858",
"0.58163",
"0.5788119",
"0.57557553",
"0.5746743",
"0.57291144",
"0.5729054",
"0.5723128",
"0.5698093",
"0.56697905",
"0.56547284",
"0.55967015",
"0.5573413",
"0.5569571",
"0.55416805",
"0.55357385",
"0.5503957",
"0.5500609",
"0.54806626",
"0.54765123",
"0.5470845",
"0.54673046",
"0.54508483",
"0.5450301",
"0.54457873",
"0.54387206",
"0.5426025",
"0.5404676",
"0.54032564",
"0.5402732",
"0.53978544",
"0.5383267",
"0.5374865",
"0.537339",
"0.53507423",
"0.5350499",
"0.5350223",
"0.53487456",
"0.53389555",
"0.5332075",
"0.5328183",
"0.5300242",
"0.5298241",
"0.5294988",
"0.52901405",
"0.52881444",
"0.5276897",
"0.5271296",
"0.5259611",
"0.5258491",
"0.52551293",
"0.5250246",
"0.52499783",
"0.5247407",
"0.5243567",
"0.52354264",
"0.52350247",
"0.52344936",
"0.5227841",
"0.5221826",
"0.5211902",
"0.52020323",
"0.52018934",
"0.51997036",
"0.51946104",
"0.5194291",
"0.51941395",
"0.51925486",
"0.518527",
"0.51838195",
"0.5178926",
"0.5177009",
"0.5176597",
"0.51732767",
"0.5164288",
"0.51621276",
"0.5157451",
"0.5154035",
"0.515289",
"0.51443464",
"0.5143425",
"0.5137975",
"0.5137753",
"0.5134305",
"0.5124424",
"0.5115651",
"0.5107627",
"0.51060086",
"0.51047677",
"0.5101002",
"0.5097839",
"0.50957936"
] |
0.7834716
|
0
|
(str) > bool Return True if correct container type
|
def is_container_type_correct(self, container):
return container in self.containers
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_container(item):\n if isinstance(item, str):\n return False\n elif hasattr(item, \"__iter__\"):\n return True\n\n return False",
"def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))",
"def is_iterable_container(value):\n # strings are iterable too so we have to treat them as a special case\n return not isinstance(value, str) and isinstance(value, collections.Iterable)",
"def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False",
"def is_simple(self) -> bool:\n return self.data in ('int', 'bool', 'float', 'str')",
"def test_is_container(self):\n # verify ----------------------\n try:\n 1 in self.collection\n except TypeError:\n msg = \"'Collection' object is not container\"\n self.fail(msg)",
"def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False",
"def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string",
"def is_string(value):\n return isinstance(value, (str, bytes))",
"def check_type(self):\n return True",
"def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True",
"def is_container(value: object) -> TypeGuard[AnyContainer]:\n if isinstance(value, Container):\n return True\n if hasattr(value, \"__pt_container__\"):\n return is_container(cast(\"MagicContainer\", value).__pt_container__())\n return False",
"def isdatatype(object):\n return isinstance(object, (str, int, bool, float, type(None)))",
"def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False",
"def is_string(value):\n return isinstance(value, basestring)",
"def is_collection(var):\n return isinstance(var, Iterable) and not isinstance(var, str)",
"def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False",
"def has_acceptable_type(self, value):\n if not value:\n return False\n if super().has_acceptable_type(value):\n return True\n # Hmmm ok maybe we're running under IPython:\n try:\n import IPython\n return isinstance(value, IPython.kernel.zmq.iostream.OutStream)\n except ImportError:\n return False",
"def __contains__(self, a):\n try:\n self.convert(a)\n except CoercionFailed:\n return False\n\n return True",
"def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode",
"def isString(x):\n if type(x) == str:\n return True\n else:\n return False",
"def _is_non_string_iterable(value):\n if isinstance(value, str):\n return False\n if hasattr(value, '__iter__'):\n return True\n if isinstance(value, collections.abc.Sequence):\n return True\n return False",
"def _is_str(item):\n return isinstance(item, str)",
"def test_return_type(self):\n self.assertEqual(type(self.s0.from_json_string(self.string)), list)",
"def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))",
"def is_container(self, scopestr: str):\n return scopestr in self.con_scopestr_to_node",
"def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)",
"def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )",
"def is_string(document):\r\n return isinstance(document, str)",
"def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False",
"def _is_string(arg):\n return isinstance(arg, types.StringTypes)",
"def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False",
"def is_container(self):\n return (self.__type & NODE_TAG) and self.children",
"def is_sequence_of_str(items):\n return all(isinstance(item, basestring) for item in items)",
"def is_string(value):\n return isinstance(value, string_types)",
"def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))",
"def _isinstance(cls, x):\n return isinstance(x, cls.PYTHON_TYPE_CHECK)",
"def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)",
"def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )",
"def __bool__(self):\n return _uhd_swig.uhd_string_vector_t___bool__(self)",
"def _is_sequence(obj):\n return hasattr(obj, \"__iter__\") and not isinstance(obj, str)",
"def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)",
"def is_container(soln_stk, container_config):\n\n return (is_preconfigured(soln_stk, container_config) or\n is_generic(soln_stk, container_config))",
"def is_string(obj):\n return isinstance(obj, basestring)",
"def is_valid_type(type):\n return type in type_to_adapter",
"def is_typed_dict(self) -> bool:\n return True",
"def is_acceptable(self, value: Any) -> bool:\n if self == self.ARTIFACT_LIST:\n return typing_utils.is_homogeneous_artifact_list(value)\n elif self == self.ARTIFACT_MULTIMAP:\n return typing_utils.is_artifact_multimap(value)\n elif self == self.ARTIFACT_MULTIMAP_LIST:\n return typing_utils.is_list_of_artifact_multimap(value)\n raise NotImplementedError(f'Cannot check type for {self}.')",
"def is_my_type(type_str):\n raise NotImplementedError()",
"def of_type(self, a):\n return type(a) == type(self.one)",
"def is_categoric(series: List) -> bool:\n if series.dtype == str or series.dtype == np.object:\n try:\n if (\n int(re.split(r\"[^\\w\\s]\", series[0])[0]) >= 1900\n and len(re.split(r\"[^\\w\\s]\", series[0])) >= 3\n ):\n return False\n else:\n return True\n except:\n if (\n len(series.unique()) / len(series) == 1\n or \"id\" in series.name.lower()\n ):\n return False\n elif (\n True in series.unique().tolist()\n and False in series.unique().tolist()\n ):\n return False\n elif (\n \"True\" in series.unique().tolist()\n and \"False\" in series.unique().tolist()\n ):\n return False\n else:\n return True\n else:\n return False",
"def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()",
"def _check_datatype_to_string(prediction):\n if isinstance(prediction, str):\n return True\n raise TypeError('Prediction is not in string type.')",
"def _has_str_elems(obj):\n return all([isinstance(elem, str) for elem in obj])",
"def is_composite_type(self):\n return False",
"def _has_numeric_or_bool(self) -> bool:\n dtypes: Set[str] = set(self._data.keys())\n return 'i' in dtypes or 'f' in dtypes or 'b' in dtypes",
"def is_return_element(se: Any) -> bool:\n\n def _is_output_field(o):\n if isinstance(o, tuple):\n for i in o:\n if not _is_output_field(i):\n return False\n return True\n return inspect.isclass(o) and issubclass(o, BaseField)\n\n if isinstance(se, Sequence):\n if len(se) != 1:\n return False\n return _is_output_field(se[0])\n return _is_output_field(se)",
"def test_Container_Contains(self):\n test = \"key3\" in ee.Dictionary({\"key1\": 1, \"key2\": 2})\n self.assertIsInstance(test, bool)",
"def validate(self,value):\r\n return type(value) is self.datatype",
"def is_string(obj):\n return isinstance(obj, str)",
"def canAdapt(self, stru):\n return isinstance(stru, crystal)",
"def check_iterable(value):\n try:\n iter(value)\n if not isinstance(value, six.string_types):\n return True\n else:\n return False\n except Exception as e:\n pass\n\n return False",
"def _is_valid_set(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:set\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)",
"def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)",
"def is_non_string_iterable(obj: object) -> bool:\n return not (isinstance(obj, str) or isinstance(obj, bytes))\\\n and isinstance(obj, Iterable)",
"def _is_input_or_output_type(io: type, type_str: str):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False",
"def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))",
"def applyStringTypes(self):\n ok = False\n try:\n for ii, atName in enumerate(self.getAttributeList()):\n _, isMandatory = self.__getAttributeInfo(atName)\n dataType = \"string\"\n for row in self.data:\n if row[ii] is None or row[ii] in [\".\", \"?\"]:\n row[ii] = \".\" if isMandatory else \"?\"\n else:\n row[ii] = self.__castD[dataType](row[ii])\n #\n self.__attributeTypeD[atName] = dataType\n ok = True\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n if self._raiseExceptions:\n raise e\n return ok",
"def test_type(self):\n state1 = State()\n self.assertEqual(type(state1.name), str)\n self.assertNotEqual(type(state1.name), list)",
"def type_valid(self):\n return contain_in_list_equal(self._type_or_ref, PARAM_RES_TYPES)",
"def is_text(self):\n return self.value_type in (str, unicode)",
"def _column_type(strings, has_invisible=True, numparse=True):\n types = [_type(s, has_invisible, numparse) for s in strings]\n return reduce(_more_generic, types, bool)",
"def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )",
"def is_list(self) -> bool:\n return False",
"def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True",
"def is_supported_type(self) -> bool:\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS",
"def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0",
"def check_type(self, data_input, debug_flag):\n _type = []\n \n if self.zigzag(data_input, debug_flag): _type.append(str(\"ZIGZAG\"))\n if self.flat(data_input, debug_flag): _type.append(str(\"FLAT\")) \n if self.r_flat(data_input, debug_flag): _type.append(str(\"R_FLAT\")) \n if self.e_flat(data_input, debug_flag): _type.append(str(\"E_FLAT\")) \n #print('_type:',_type)\n\n if _type:\n return True, _type\n else:\n return False, _type",
"def is_simple(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer',\n 'string')",
"def is_dict(self) -> bool:\n return True",
"def _is_encodable_simple(sv):\n return sv not in (None, str(), int())",
"def is_collection(obj):\n return type(obj) in COLLECTIONS_SET",
"def test_return_type(self):\n self.assertEqual(type(self.obj.to_json_string(self.valid_ld)), str)",
"def is_a(self, t):\n return isinstance(self._, t)",
"def is_choices(typ) -> bool:\n return type(typ) is type(Choices)",
"def _is_input_or_output_type(io: type, type_str: Literal[\"Input\", \"Output\", \"Meta\"]):\n if isinstance(io, type) and io.__module__.startswith(mldesigner_pkg):\n if type_str in io.__name__:\n return True\n return False",
"def is_simple(self): # -> bool:\n ...",
"def stringable(self):\n return True",
"def __bool__(self):\n return _libsbml.string___bool__(self)",
"def __bool__(self):\n return bool(self._items)",
"def __bool__(self):\n return bool(self._items)",
"def is_valid(self, name, value, returnError = False):\n convert = self.expected[name]\n if convert == str:\n # only worry about int, list, etc;\n return True\n try:\n val = ast.literal_eval(value)\n '''\n Safely evaluate an expression node or a Unicode or Latin-1 encoded string containing a Python literal or container display.\n The string or node provided may only consist of the following\n Python literal structures: strings, numbers, tuples, lists, dicts, booleans, and None.\n '''\n except Exception, e:\n if returnError:\n return e\n else:\n print \"Tried %s(%s), but failed.\" % (convert.__name__, value)\n print e\n return False\n else:\n return type(val) == convert",
"def check_tree_type(tree):\n return tree.type in ref",
"def is_sequence(value):\n return (hasattr(value, \"__iter__\") and not\n isinstance(value, (six.string_types, six.binary_type)))",
"def simple(self) -> bool:\n return is_simple(self.string)",
"def is_bool (self, phrase):\r\n \r\n return isinstance(phrase,bool)",
"def is_(t, x):\n return type(x) is t",
"def test_match_types(self):\n f = lws.match_types\n # assert f(str, u'test') is True\n assert f(str, 'test') is True\n assert f(int, 123) is True\n assert f(int, 123.00) is False\n assert f(bool, [1, 2, 3]) is False",
"def is_co(self, astype):\n if isinstance(astype, (tuple, list)):\n return self.package(\"DataStructure\").CO in astype\n\n return astype is self.package(\"DataStructure\").CO",
"def test_types(self):\n assert types.typeClass(\"str\") == str\n\n assert types.isBuiltinType(\"str\")\n\n assert types.isCollectionType(\"map\")\n assert types.isCollectionType(\"seq\")\n assert not types.isCollectionType(\"str\")\n\n assert types.isScalarType(\"str\")\n assert not types.isScalarType(\"seq\")\n assert not types.isScalarType(\"map\")\n\n assert types.isCollection([])\n assert types.isCollection({})\n assert not types.isCollection(\"foo\")\n\n assert types.isScalar(\"\")\n assert types.isScalar(True)\n assert not types.isScalar([])\n\n assert types.isCorrectType(\"\", str)\n assert types.isCorrectType({}, dict)\n\n assert types.isString(\"foo\")\n assert not types.isString([])\n\n assert types.isInt(1)\n assert not types.isInt(\"foo\")\n\n assert types.isBool(True)\n assert not types.isBool(1)\n assert not types.isBool(\"true\")\n\n assert types.isFloat(1.0)\n assert not types.isFloat(\"foo\")\n\n assert types.isNumber(1)\n assert types.isNumber(1.0)\n assert not types.isNumber(\"foo\")\n\n assert types.isText(\"foo\")\n assert types.isText(1)\n assert types.isText(1.0)\n assert not types.isText([])\n assert not types.isText(True)\n\n assert types.isAny(\"foo\")\n assert types.isAny(True)\n assert types.isAny(1)\n assert types.isAny(1.0)\n assert types.isAny({})\n assert types.isAny([])\n\n assert types.isEnum(\"foo\")\n assert not types.isEnum(1)\n\n assert types.isNone(None)\n assert not types.isNone(\"foo\")",
"def isinstance(self, class_or_string):\n if class_or_string is None:\n return False\n import inspect\n if inspect.isclass(class_or_string):\n return isinstance(self, class_or_string)\n else:\n return self.__class__.__name__.lower() == class_or_string.lower()"
] |
[
"0.7596315",
"0.6735085",
"0.6629723",
"0.6603197",
"0.6592958",
"0.65436935",
"0.6490295",
"0.64833474",
"0.6360425",
"0.634218",
"0.6331468",
"0.633145",
"0.63053995",
"0.6277777",
"0.6248238",
"0.6234861",
"0.6221624",
"0.6171583",
"0.61659265",
"0.61386937",
"0.6130888",
"0.61276805",
"0.61137885",
"0.6105598",
"0.61053765",
"0.6099245",
"0.6091279",
"0.6083298",
"0.60821974",
"0.6081491",
"0.6079719",
"0.6070545",
"0.6062296",
"0.6060581",
"0.6059261",
"0.60498077",
"0.6044012",
"0.60219467",
"0.6018104",
"0.6007994",
"0.5973646",
"0.59501404",
"0.59382176",
"0.5935423",
"0.59306973",
"0.5927668",
"0.59100115",
"0.5902767",
"0.5898464",
"0.5897211",
"0.58881986",
"0.5877681",
"0.58562624",
"0.5842545",
"0.5842061",
"0.58413",
"0.582919",
"0.5821535",
"0.58157885",
"0.5799761",
"0.57946366",
"0.57878715",
"0.5784091",
"0.5774087",
"0.57608783",
"0.5760613",
"0.5760189",
"0.575999",
"0.57572484",
"0.57565266",
"0.5750941",
"0.5741655",
"0.5736293",
"0.5731688",
"0.57234544",
"0.5721948",
"0.5705869",
"0.5700954",
"0.5694657",
"0.5690045",
"0.5689646",
"0.56896234",
"0.56732816",
"0.5666759",
"0.5666491",
"0.56663394",
"0.5666177",
"0.5659741",
"0.5656833",
"0.5656833",
"0.56536597",
"0.5649866",
"0.56444454",
"0.5641119",
"0.56388134",
"0.5637591",
"0.56351626",
"0.5622607",
"0.56192774",
"0.5609681"
] |
0.7524484
|
1
|
(class xml.etree.ElementTree.Element) > list Return dictionary with all Postage information
|
def get_postage_from_response(self, xml_response):
postages = xml_response.find("Package").findall("Postage")
postages_list = []
if postages:
for postage in postages:
postages_list.append(self.get_response_information(postage))
return postages_list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def xml_to_dict(self):\n medicine_node = ['medicines', 'dose', 'unit', 'unit_price', 'goods_num', 'dose_that',\n 'remark', 'm_usage', 'goods_norms', 'goods_orgin', 'MedPerDos', 'MedPerDay']\n dict_data = {}\n n = 0\n xq_childs = 0\n for child in self.root.iter():\n # print(child.tag)\n # print(child.text)\n if child.tag not in medicine_node:\n if child.tag == 'xq':\n xq_childs = child.__len__() # __len__:返回元素大小,元素的大小为元素的子元素数量\n dict_data[child.tag] = child.text\n else:\n if n < xq_childs:\n dict_data[child.tag] = [child.text]\n n += 1\n else:\n dict_data[child.tag].append(child.text)\n return dict_data",
"def _construct_data_xml(self, xml_file_list):\n award_dict = {}\n award_list = []\n for xml_file in xml_file_list:\n xml_file.seek(0)\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n for response in root:\n temp_dict = {}\n for award in response:\n if award.tag == 'entry':\n continue\n try:\n # temp_dict[award.tag].append(award.text)\n temp_dict[award.tag] = award.text\n except KeyError:\n print(\"KeyError\")\n # temp_dict[award.tag] = [award.text]\n\n # if 'entry' in temp_dict.keys():\n # del temp_dict['entry']\n if len(temp_dict) > 0:\n award_list.append(temp_dict)\n\n return award_list",
"def extract_xml(self, xml_list):\n craziness = dict()\n for i in range(len(xml_list)):\n if xml_list[i]['@type'] == 'EMBL':\n craziness['EMBL']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'RefSeq':\n craziness['RefSeq']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Ensembl':\n craziness['Ensembl']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'OrthoDB':\n craziness['OrthoDB']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'PROSITE':\n craziness['PROSITE']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Pfam':\n craziness['Pfam']=(xml_list[i]['@id'])\n return craziness",
"def get_postage_from_response(self, xml_response):\r\n services = xml_response.find(\"Package\").findall(\"Service\")\r\n postages_list = []\r\n\r\n if services:\r\n for postages in services:\r\n postages_list.append(postages.find(\"Postage\").text)\r\n\r\n return postages_list",
"def parse_element(elem):\n return_dict = {}\n for e in elem:\n return_dict[e.tag] = e.text\n return return_dict",
"def dict(self):\n return xmltodict.parse(str(self))",
"def _get_information(self):\n pros_cons = []\n pros_cons_dict = {}\n\n for i, div in enumerate(self._tab.find_all(\"div\")):\n for p in div.find_all(\"p\"):\n pro_con = p.get_text(strip=True)\n pros_cons.append(pro_con)\n pros_cons_dict.update({self._keys_dict[i]: pros_cons})\n pros_cons = []\n\n return pros_cons_dict",
"def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)",
"def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))",
"def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object",
"def _get_information(self):\n reviews = self._tab.find_all(\"div\", class_=\"review\", attrs={'itemprop': 'review'})\n return [(self._get_review(elem), self._get_published_date(elem)) for elem in reviews]",
"def xmlpost_to_dict(post):\n\n tree = ET.parse(post)\n root = tree.getroot()\n msg = root.find('message')\n\n post_data = {}\n\n board_id = msg.find('board_id')\n post_data['board_id'] = int(board_id.text)\n\n root_post = msg.find('root').attrib['href']\n post_data['root_post'] = root_post.split('/')[-1]\n\n kudos = msg.find('kudos')\n count = kudos.find('count')\n post_data['kudos_count'] = int(count.text)\n\n edit_author_id = msg.find('last_edit_author').attrib['href']\n post_data['edit_author_id'] = int(edit_author_id.split('/')[-1])\n\n post_time = msg.find('post_time')\n post_data['post_time'] = post_time.text\n\n last_edit_time = msg.find('last_edit_time')\n post_data['last_edit_time'] = last_edit_time.text\n\n body = msg.find('body')\n post_data['body'] = body.text\n\n thread = msg.find('thread').attrib['href']\n post_data['thread'] = int(thread.split('/')[-1])\n\n board = msg.find('board').attrib['href']\n post_data['board'] = board.split('/')[-1]\n\n try:\n parent_post = msg.find('parent').attrib['href']\n post_data['parent_post'] = int(parent_post.split('/')[-1])\n except KeyError:\n post_data['parent_post'] = None\n\n views = msg.find('views')\n post_data['views'] = int(views.find('count').text)\n\n subject = msg.find('subject')\n post_data['subject'] = subject.text\n\n post_id = msg.find('id')\n post_data['post_id'] = int(post_id.text)\n\n author_id = msg.find('author').attrib['href']\n post_data['author_id'] = int(author_id.split('/')[-1])\n\n return post_data",
"def to_dict(xml):\n children = xml.getchildren()\n if not children:\n return xml.text\n else:\n out = {}\n for node in xml.getchildren():\n if node.tag in out:\n if not isinstance(out[node.tag], list):\n out[node.tag] = [out[node.tag]]\n out[node.tag].append(to_dict(node))\n else:\n out[node.tag] = to_dict(node)\n return out",
"def _get_elements(self):\n address_elements = {\n 'organisation': \"{}{}\".format(\n self.organisation if self.organisation else \"\",\n '\\n' + self.department if self.department else \"\",\n ),\n 'sub-building name': self.sub_building_name,\n 'building name': self.building_name,\n 'building number': self.building_number,\n 'PO box': self.po_box_num,\n 'dependent thoroughfare': self.dependent_thoroughfare,\n 'thoroughfare': self.thoroughfare,\n 'double dependent locality': self.double_dependent_locality,\n 'dependent locality': self.dependent_locality,\n 'town': self.town,\n 'postcode': \"{} {}\".format(\n self.postcode[:-3], \n self.postcode[-3:]\n ),\n 'concatenation indicator': self.concatenation_indicator\n }\n return address_elements",
"def get_attachments(xml):\r\n items = get_items(xml)\r\n names = {}\r\n attachments = []\r\n\r\n for item in items:\r\n kind = item.find('post_type').string\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n\r\n if kind == 'attachment':\r\n attachments.append((item.find('post_parent').string,\r\n item.find('attachment_url').string))\r\n else:\r\n filename = get_filename(filename, post_id)\r\n names[post_id] = filename\r\n attachedposts = {}\r\n for parent, url in attachments:\r\n try:\r\n parent_name = names[parent]\r\n except KeyError:\r\n #attachment's parent is not a valid post\r\n parent_name = None\r\n\r\n try:\r\n attachedposts[parent_name].append(url)\r\n except KeyError:\r\n attachedposts[parent_name] = []\r\n attachedposts[parent_name].append(url)\r\n return attachedposts",
"def get_tags(element):\n tags = []\n id_num = element.attrib['id']\n for child in element.iter('tag'):\n attr = child.attrib\n\n # check for problematic characters first and skip if matches\n if PROBLEMCHARS.search(attr['k']):\n continue\n\n child_dict = {}\n child_dict['id'] = id_num\n child_dict['value'] = attr['v']\n\n # stackoverflow.com/questions/6903557/splitting-on-first-occurrence\n child_dict['key'] = attr['k'].split(':', 1)[-1]\n\n # Check if the k tag has : in it and treat according to specs\n if LOWER_COLON.search(attr['k']):\n child_dict['type'] = attr['k'].split(':')[0]\n else:\n child_dict['type'] = default_tag_type\n\n # street name check (not all : matches are addr:)\n if child_dict['type'] == 'addr' & child_dict['key'] == 'street':\n child_dict['value'] = update_street_name(child_dict['value'])\n\n tags.append(child_dict)\n\n return tags",
"def getXmlDict(oxml):\n lines = oxml.split(\"\\n\")\n rrd_d = {}\n # <cf> AVERAGE </cf>\n # <pdp_per_row> 288 </pdp_per_row> <!-- 86400 seconds -->\n\n # parse xml file\n key = \"\"\n rows = [] \n for line in lines:\n if (reMatchCF(line)):\n cf = line.split()[1]\n key += cf\n if (reMatchPDP(line)):\n pdp = line.split()[1]\n key += pdp\n if (reMatchRow(line)):\n ele = line.split()\n time = ele[5]\n val = ele[8]\n rows.append([time,val,line])\n # end of rra is reached, store to dict and rest vals\n if (reMatchDBEnd(line) and key and rows):\n rrd_d[key] = rows\n key = \"\"\n rows = []\n return rrd_d",
"def _xml_convert(self, element):\n\n children = list(element)\n\n if len(children) == 0:\n return self._type_convert(element.text)\n else:\n # if the fist child tag is list-item means all children are list-item\n if children[0].tag == \"list-item\":\n data = []\n for child in children:\n data.append(self._xml_convert(child))\n else:\n data = {}\n for child in children:\n data[child.tag] = self._xml_convert(child)\n\n return data",
"def pitems(self):\n return self.palues().root()",
"def pitems(self):\n return self.palues().root()",
"def post_data(driver):\n post_info = {\n \"post_age\" : \"li.posted\", \n \"page_views\" : \"ul.posting-info li.views\"\n }\n for key, selector in post_info.items():\n try:\n text = driver.find_element_by_css_selector(selector).text\n if key == \"post_age\":\n post_info[key] = parse_post_age(text)\n else:\n post_info[key] = ''.join(list(filter(lambda c: c.isdigit(), text)))\n except Exception as e:\n post_info[key] = \"\"\n pass\n return post_info",
"def extract_node(element, node_attr_fields = NODE_FIELDS, problem_chars=PROBLEMCHARS, default_tag_type='regular') :\r\n attribs = {}\r\n tags = []\r\n\r\n \"\"\" Extraction Routine\"\"\"\r\n for key in node_attr_fields:\r\n attribs[key] = element.attrib[key]\r\n for tag in element.iter(\"tag\"):\r\n node_tag = {}\r\n node_tag[\"type\"] = default_tag_type\r\n node_tag[\"id\"] = attribs[\"id\"]\r\n node_tag[\"value\"] = tag.attrib[\"v\"]\r\n\r\n k = tag.attrib[\"k\"]\r\n\r\n if problem_chars.search(k):\r\n continue\r\n elif \":\" in k:\r\n node_tag[\"key\"] = k.split(\":\", 1)[1]\r\n node_tag[\"type\"] = k.split(\":\", 1)[0]\r\n else:\r\n node_tag[\"key\"] = k\r\n\r\n # Update city name , if any, before appending the dictionary in list\r\n\r\n if node_tag[\"key\"] == \"city\":\r\n node_tag[\"value\"] = update_city_name(node_tag[\"value\"])\r\n\r\n # Update street name, if any , as per mapping\r\n\r\n if node_tag[\"key\"] == \"street\" or \"street:name\":\r\n node_tag[\"value\"] = update_street_name(node_tag[\"value\"], mapping)\r\n\r\n # Check if postcode is valid, if invalid prefix the postcode value with 'fixme:'\r\n\r\n if node_tag[\"key\"] == \"postcode\":\r\n invalid, node_tag[\"value\"] = update_postcode(node_tag[\"value\"])\r\n if invalid:\r\n node_tag[\"value\"] = 'fixme:' + node_tag[\"value\"]\r\n\r\n\r\n tags.append(node_tag)\r\n\r\n return {'node': attribs, 'node_tags': tags}",
"def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject",
"def get_data(tree_elem):\n fly_lst = []\n for element in tree_elem:\n for elem in element.xpath('td/label/div[1]/span'):\n fly_dict = dict()\n fly_info_lst = [item.strip() for item in elem.xpath('@title')[0].split(',')]\n class_cost_lst = fly_info_lst[3].split(':')\n fly_dict['dep/arv'] = fly_info_lst[1]\n fly_dict['dur'] = fly_info_lst[2]\n fly_dict['class'] = class_cost_lst[0]\n fly_dict['cost'] = get_price(class_cost_lst[1])\n fly_lst.append(fly_dict)\n return fly_lst",
"def get_items_from_element(element):\n data = {'element': element,\n 'items': []}\n for item in element[len(element)-1]:\n item_info = {'data': item.items(),\n 'tag': item.tag,\n 'keys': item.keys()}\n data['items'].append(item_info)\n return data",
"def retrieve_ext_list(self, puid_list):\n xml_iter = self._parse_xml()\n puiddict = {}\n for topelements in xml_iter:\n if (\n topelements.tag\n == \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}FileFormatCollection\"\n ):\n for fileformats in topelements:\n puid = fileformats.get(\"PUID\")\n for puids in puid_list:\n if puids != puid:\n continue\n ext = fileformats.find(\n \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}Extension\"\n )\n if ext is not None:\n # Return the first file format extension.\n puiddict[puids] = ext.text\n break\n puiddict[puids] = None\n break\n notfound = []\n for puid in puid_list:\n if puid not in puiddict:\n if puid not in notfound:\n notfound.append(puid)\n if len(notfound) > 0:\n for puid in notfound:\n puiddict[puid] = \"notfound\"\n return puiddict",
"def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict",
"def kgml_parser(self, kegg_cpd_id_list):\n result_dic = dict()\n # try:\n kg_tree = et.fromstring(self.kgml)\n for cpd in kegg_cpd_id_list:\n for el in kg_tree.iterfind('entry/graphics[@name=\"%s\"]' % cpd):\n if cpd not in result_dic.keys():\n result_dic[cpd] = [(el.get('x'), el.get('y'))]\n else:\n result_dic[cpd].append((el.get('x'), el.get('y')))\n # except:\n # # todo error exception\n # print 'error while parsing kgml of %s' % self.kegg_id\n return result_dic",
"def get_pcr_sequences(self):\n d = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-assays':\n for assay in child:\n attributes = assay.attrib\n assay_id = attributes['id']\n if re.match(r'rs\\d+', assay_id):\n d[assay_id] = [attributes['pcr1'], attributes['pcr2']]\n return d",
"def get_proplist(self, naam):\r\n h = self._root.find(naam)\r\n if h is None:\r\n h = []\r\n else:\r\n hh = h.findall(\"regel\")\r\n h = []\r\n for x in hh:\r\n if x.text is None:\r\n h.append(\"\")\r\n else:\r\n h.append(x.text.rstrip())\r\n return h",
"def parse_book_record(root) -> dict:\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//BookTitle/text()\")))\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Book/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Book/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Book/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n return doc",
"def get_elements():\n elements = { 'Shapes':[], 'Strokes':[] }\n curves_knob = rp_node['curves']\n root_layer = curves_knob.rootLayer\n elements = parse_layer(root_layer, elements, [root_layer])\n print elements",
"def xml_para_dicionario(self, string, multi=False):\r\n try:\r\n\t root = xml.fromstring(string)\r\n itens = []\r\n for cclass in root:\r\n mykeys = []\r\n myvalues = []\r\n for item in cclass:\r\n children = item.getchildren() # Em caso de tags encadeadas\r\n if children:\r\n for child in children:\r\n mykeys.append(child.tag.lower())\r\n myvalues.append(child.text )\r\n\t else:\r\n mykeys.append(item.tag.lower())\r\n myvalues.append(item.text )\r\n it = dict(zip(mykeys, myvalues))\r\n itens.append(it)\r\n\r\n if multi: # Retorna uma lista de dicionarios \r\n return itens\r\n return itens[0] # Retorna apenas um dicionario\r\n\r\n\texcept Exception, e:\r\n return None",
"def recursive_parse_xml_to_dict(xml):\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}",
"def elem2dict(node):\n result = {}\n\n for element in node.iterchildren():\n # Remove namespace prefix\n key = element.tag.split('}')[1] if '}' in element.tag else element.tag\n key = key[:1].lower() + key[1:]\n\n # Process element as tree element if the inner XML contains non-whitespace content\n if element.text and element.text.strip():\n value = element.text\n else:\n value = elem2dict(element)\n if key in result:\n if type(result[key]) is list:\n result[key].append(value)\n else:\n tempvalue = result[key].copy()\n result[key] = [tempvalue, value]\n else:\n result[key] = value\n return result",
"def _parse_political_posts(self):\n functions = [\n h\n for h in self.right_column.find_all(\"h4\")\n if h.text == \"Politische Funktionen\"\n ]\n if functions:\n functions = functions[0].nextSibling.nextSibling.find_all(\"li\")\n # TODO: Can we do better than just taking the whole string?\n return {\"posts\": self._get_current_and_former(functions)}\n return {}",
"def get_data_from_bs(bs_data):\n all_raw_data = bs_data.find_all(\"properties\")\n return (to_dict(tag for tag in raw if type(tag) is Tag) for raw in all_raw_data)",
"def parseX(self):\n\t\treturn self._dictOut.keys()",
"def recursive_parse_xml_to_dict(xml):\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}",
"def parseX(self):\n return self.dictOut.keys()",
"def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict",
"def get_devices_information():\n global nipper_xml\n devices = {}\n\n for device in nipper_xml.findall('./information/devices/device'):\n if DEBUG:\n print \"\\t\" + note + \"Name: %s\" % device.get('name')\n print \"\\t\" + note + \"Type: %s\" % device.get('type')\n print \"\\t\" + note + \"OS: %s\" % device.get('os')\n print \"\\t\" + note + \"OS Version: %s\" % device.get('osversion')\n devices[device.attrib.get('name')] = {'name': device.get('name'),\n 'type': device.get('type'),\n 'os': device.get('os'),\n 'osversion': device.get('osversion')}\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def load_infos(self):\n xml = self.api.photos_getInfo(photo_id=self.id)\n xml = xml.find(\"photo\")\n out = xml.attrib\n out[\"title\"] = xml.find(\"title\").text\n out[\"description\"] = xml.find(\"description\").text\n out[\"dates\"] = xml.find(\"dates\").attrib\n\n # Load urls\n out[\"urls\"] = {}\n for url_xml in xml.find(\"urls\").findall(\"url\"):\n out[\"urls\"][url_xml.attrib[\"type\"]] = url_xml.text\n\n # Load tags\n out[\"tags\"] = []\n for tag_xml in xml.find(\"tags\").findall(\"tag\"):\n tag = tag_xml.attrib\n tag[\"tag\"] = tag_xml.text\n out[\"tags\"].append(tag)\n\n return out",
"def get_agencies():\n\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=agencyList'\n xml_request = requests.get(xml_query_string)\n agencies = {}\n root = ET.fromstring(xml_request.text)\n\n for child in root:\n agencies[child.attrib['tag']] = child.attrib['title']\n return agencies",
"def info(self) -> dict:\n xml_path = self.xml_path.format(id=self.id)\n p_id = int(ET.parse(xml_path).find('pattern_id').text)\n defect_flag = bool(int(ET.parse(xml_path).find('defective').text))\n info = {'pattern_id': p_id, 'id': self.id, 'defective': defect_flag}\n return info",
"def wp2fields(xml, wp_custpost=False):\r\n\r\n items = get_items(xml)\r\n for item in items:\r\n\r\n if item.find('status').string == \"publish\":\r\n\r\n try:\r\n # Use HTMLParser due to issues with BeautifulSoup 3\r\n title = HTMLParser().unescape(item.title.contents[0])\r\n except IndexError:\r\n title = 'No title [%s]' % item.find('post_name').string\r\n logger.warning('Post \"%s\" is lacking a proper title' % title)\r\n\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n filename = get_filename(filename, post_id)\r\n\r\n content = item.find('encoded').string\r\n raw_date = item.find('post_date').string\r\n date_object = time.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\r\n date = time.strftime(\"%Y-%m-%d %H:%M\", date_object)\r\n author = item.find('creator').string\r\n\r\n categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]\r\n # caturl = [cat['nicename'] for cat in item.find(domain='category')]\r\n\r\n tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]\r\n\r\n kind = 'article'\r\n post_type = item.find('post_type').string\r\n if post_type == 'page':\r\n kind = 'page'\r\n elif wp_custpost:\r\n if post_type == 'post':\r\n pass\r\n # Old behaviour was to name everything not a page as an article.\r\n # Theoretically all attachments have status == inherit so\r\n # no attachments should be here. But this statement is to\r\n # maintain existing behaviour in case that doesn't hold true.\r\n elif post_type == 'attachment':\r\n pass\r\n else:\r\n kind = post_type\r\n yield (title, content, filename, date, author, categories, tags,\r\n kind, \"wp-html\")",
"def readWarp(self):\n warpDict = {}\n for warpAxisElement in self.root.findall(\".warp/axis\"):\n axisName = warpAxisElement.attrib.get(\"name\")\n warpDict[axisName] = []\n for warpPoint in warpAxisElement.findall(\".map\"):\n inputValue = float(warpPoint.attrib.get(\"input\"))\n outputValue = float(warpPoint.attrib.get(\"output\"))\n warpDict[axisName].append((inputValue, outputValue))\n self.warpDict = warpDict",
"def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements",
"def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())",
"def parseX(self):\n return self._dictOut.keys()",
"def rss_attributes(self):\n return {u\"version\": self._version,\n u\"xmlns:media\": u\"http://search.yahoo.com/mrss/\",\n \"xmlns:atom\": u\"http://www.w3.org/2005/Atom\"\n }",
"def _get_sideinfo(self, content_tag):\n\n # dictionary that used to store all the relevant information\n # regarding an apartment\n sideinfo = {} \n try:\n # main content of all the relavent features \n apt_info_tags = content_tag.find_all('div', class_='flex flex-col pr-8')\n \n for apt_tag in apt_info_tags:\n # construct (key, value) pair for the dictionary \n key = apt_tag.find('div', class_='data-name') \\\n .get_text() \\\n .strip()\n\n value = apt_tag.find('div', class_='data-value') \\\n .get_text() \\\n .strip()\n try:\n value = self._extract_num(value)\n except:\n pass\n\n # fill in the dictionary\n sideinfo[key] = value\n\n return sideinfo\n except:\n return sideinfo",
"def etree2dict(element):\n i = dict(element.items())\n i.update(_make_content(i, element.text, strip=True))\n\n for child in element:\n tag = child.tag\n value = etree2dict(child)\n i.update(_make_content(i, value, tag))\n\n if element.text and not set(i).difference([\"content\"]):\n # element is leaf node and doesn't have attributes\n i = i.get(\"content\")\n\n return i",
"def make_books_dicts(xml, book_list):\n\n books_response = xml.GoodreadsResponse.reviews.review\n for book in books_response:\n a_book = {}\n a_book['title'] = book.book.title.cdata.encode('utf8')\n a_book['author_name'] = book.book.authors.author.name.cdata.encode('utf8')\n a_book['author_gr_id'] = int(book.book.authors.author.id.cdata.encode('utf8'))\n a_book['gr_work_id'] = int(book.book.work.id.cdata.encode('utf8'))\n a_book['description'] = book.book.description.cdata\n\n a_book['edition'] = {}\n a_book['edition']['isbn'] = valid_isbn(book.book.isbn.cdata.encode('utf8'))\n a_book['edition']['format_id'] = get_format_id(book.book.format.cdata.encode('utf8'))\n a_book['edition']['pic_url'] = book.book.image_url.cdata.encode('utf8')\n a_book['edition']['publisher'] = book.book.publisher.cdata.encode('utf8')\n a_book['edition']['gr_url'] = book.book.link.cdata.encode('utf8')\n a_book['edition']['gr_id'] = int(book.book.id.cdata.encode('utf8'))\n year = date_is_valid(book.book.publication_year.cdata.encode(\"utf8\"))\n month = date_is_valid(book.book.publication_month.cdata.encode(\"utf8\"))\n day = date_is_valid(book.book.publication_day.cdata.encode(\"utf8\"))\n a_book['edition']['date'] = datetime.date(year, month, day)\n a_book['edition']['num_pages'] = valid_page_count(book.book.num_pages.cdata.encode('utf8'))\n book_list.append(a_book)\n\n print \"*******THERE ARE \" + str(len(book_list)) + \" ON THIS SHELF*******\"\n\n return book_list",
"def parse(self):\n p = feedparser.parse(self.xml)\n self.p = p\n return p",
"def as_dict(self):\n return {'name': self.name, 'attrs': self.attrs_dict, 'children': self.tags_dict}",
"def parse_xml(file_name):\n events = (\"start\", \"end\")\n has_start = False\n json_dict = dict()\n # Traverse the XML\n for event, element in ET.iterparse(file_name, events=events, encoding=\"utf-8\", load_dtd=True, recover=True):\n print(event, element.tag, element.text)\n # Article node: initialize variables\n if event == 'start' and element.tag in INCLUDE_ARTICLES:\n has_start = True\n # Each article node has an unique attribute key\n publication_key = element.attrib['key']\n authors = list()\n publication_year = ''\n publication_type = str(element.tag)\n publication_title = ''\n # Author node\n elif event == 'start' and element.tag == 'author' and has_start:\n no_accent = lambda x: unidecode.unidecode(x) if x is not None else x\n authors.append(no_accent(element.text))\n # Title node\n elif event == 'start' and element.tag == 'title' and has_start:\n publication_title = element.text\n # Year node\n elif event == 'start' and element.tag == 'year' and has_start:\n publication_year = element.text\n # End article node: save information. This will never execute before initializing all of the variables\n elif has_start and event == 'end' and element.tag in INCLUDE_ARTICLES:\n json_dict[publication_key] = {\n '_id': publication_key,\n 'authors': authors,\n 'title': publication_title,\n 'year': publication_year,\n 'type': publication_type}\n has_start = False\n element.clear()\n else:\n # Remove element (otherwise there will be memory issues due to file size)\n element.clear()\n continue\n\n return json_dict",
"def _xmlTreeToDict(cls, node):\n if not isinstance(node, ElementTree.Element):\n raise ATException('_xmlTreeToDict(), param: [node] expected a xml.etree.ElementTree.Element')\n\n nodeDict = {}\n\n if len(node.items()) > 0:\n nodeDict.update(dict(node.items()))\n\n for child in node:\n childItemDict = cls._xmlTreeToDict(child)\n if child.tag in nodeDict:\n if isinstance(nodeDict[child.tag], list):\n nodeDict[child.tag].append(childItemDict)\n else:\n nodeDict[child.tag] = [nodeDict[child.tag], childItemDict]\n else:\n nodeDict[child.tag] = childItemDict\n\n text = ''\n if node.text is not None:\n text = node.text.strip()\n\n if len(nodeDict) > 0:\n if len(text) > 0:\n nodeDict[node.tag + '_text'] = text\n else:\n nodeDict = text\n\n return nodeDict",
"def parse(k):\n return stringify_children(xml_object.xpath(k)[0])",
"def get_tags():\n xml_tree = ET.parse(RSS_FEED)\n root = xml_tree.getroot()\n items = root.getchildren()[0].getchildren()\n\n tags = {}\n for item in items:\n children = item.getchildren()\n for child in children:\n if child.tag == \"category\":\n tag = child.text.replace(\"-\", \" \").lower()\n tags[tag] = tags.get(tag, 0) + 1\n\n return tags",
"def infos_serie(self):\n if self._root is None:\n return\n\n infos = {}\n serie = self._root.find('Series')\n infos['firstAired'] = serie.find('FirstAired').text\n infos['description'] = unicode(serie.find('Overview').text)\n infos['lastUpdated'] = int(serie.find('lastupdated').text)\n return infos",
"def extract_posts(posts_file, output_filename=direc+\"/posts.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting posts from \" + posts_file + \"...\")\r\n posts_dict = {}\r\n with open(output_filename, 'w', encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(posts_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n line = \"\"\r\n if child.attrib['PostTypeId'] == '1' and 'AcceptedAnswerId' in child.attrib:\r\n posts_dict[child.attrib['Id']] = {'accepted': child.attrib['AcceptedAnswerId'], 'other': []}\r\n clean_title = clean_markdown(child.attrib['Title'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + clean_title + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n elif child.attrib['PostTypeId'] == '2':\r\n if child.attrib['ParentId'] in posts_dict and not child.attrib['Id'] == posts_dict[child.attrib['ParentId']]['accepted']:\r\n posts_dict[child.attrib['ParentId']]['other'].append(child.attrib['Id'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['ParentId'] + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n f.write(line)\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting posts from \" + output_filename + \".\\n\")\r\n return posts_dict",
"def get_tags(self,element):\n if element in self.element2tags.keys():\n return self.element2tags[element]\n return []",
"def xml_to_dict(args):\n rdict = dict()\n args = re.sub(r'xmlns=\\\".+?\\\"', '', args)\n root = ET.fromstring(args)\n ifmtrunk = root.find('.//ifmtrunk')\n if ifmtrunk is not None:\n try:\n ifmtrunk_iter = ET.Element.iter(ifmtrunk)\n except AttributeError:\n ifmtrunk_iter = ifmtrunk.getiterator()\n\n for ele in ifmtrunk_iter:\n if ele.text is not None and len(ele.text.strip()) > 0:\n rdict[ele.tag] = ele.text\n return rdict",
"def parse(k):\r\n return stringify_children(xml_object.xpath(k)[0])",
"def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))",
"def get_attrs_dict(self, root_element):\n attr_elements = root_element.findall(\"attribute\")\n attrs_dict = {}\n for el in attr_elements:\n attrs_dict[el.attrib[\"name\"]] = {\n \"value\": el.attrib[\"value\"],\n \"type\": el.attrib.get(\"type\", None)\n }\n return attrs_dict",
"def children(self):\n try:\n attr_name = 'Parrot_%s_attributes' % self.pmc_name\n attr_type = gdb.lookup_type(attr_name).pointer()\n\n attrs = self.val['data'].cast(attr_type).dereference()\n\n '''\n Something ridiculous happens here. I take a list of tuples:\n [ (\"key1\", \"val1\"), (\"key2\", \"val2\") ]\n\n and turn it, in one iteration, into:\n [\n [(\"name\", \"key1\"), (\"value\", \"val1\")],\n [(\"name\", \"key2\"), (\"value\", \"val2\")]\n ]\n\n That, in turn, is mutated into one list.\n [\n (\"name\", \"key1\"), (\"value\", \"val1\"),\n (\"name\", \"key2\"), (\"value\", \"val2\")\n ]\n\n What we go through for 100% lazy iteration.\n '''\n name_value_tuples = PMCIterator(attrs)\n nv_iter = itertools.imap(lambda val: [ (\"name\", val[0]), (\"value\", val[1]) ],\n name_value_tuples)\n nv_chain = itertools.chain.from_iterable(nv_iter)\n\n return nv_chain\n except RuntimeError as e:\n return [ ( \"__ERROR__\", \"\" ) ].__iter__()",
"def xml2dict( xml, sanitize=True, prefix=None):\n \n \n #Decode to avert parsing errors as some software dump large text\n #fields into the file that occasionally contain erronious chars\n xml=xml.decode('utf-8', errors='ignore')\n\n \n return etree2dict(etree.fromstring(xml), sanitize, prefix)",
"def parseX(self):\n\t\treturn self._dict.keys()",
"def serialize(self):\n child_dict = OrderedDict()\n for attr, item in iteritems(self._contents):\n child_dict[attr] = item.serialize()\n return child_dict",
"def parse(tree, callback):\n doc_attrs = tree.attrib\n bibliographic = tree.find(\n '{http://www.epo.org/exchange}bibliographic-data')\n family = tree.find('{http://www.epo.org/exchange}patent-family')\n title = bibliographic.findall(\n '{http://www.epo.org/exchange}invention-title')\n callback({\n 'title': fmap(lambda x: {'title': x.text, 'lang': x.attrib.get('lang', ''), 'data-format': x.attrib.get('data-format', '')}, title),\n 'country': doc_attrs.get('country', ''),\n 'status': doc_attrs.get('status', ''),\n # identical to <doc-number> in <publication-reference> (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'doc-number': doc_attrs.get('doc-number', ''),\n # identical to <kind> in <publication-reference> (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'kind': doc_attrs.get('kind', ''),\n # 5.3.1. Attribute \"doc-id\" (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'doc-id': doc_attrs.get('doc-id', ''),\n 'date-publ': doc_attrs.get('date-publ', ''),\n 'family-id': doc_attrs.get('family-id', ''),\n 'family-members': familymembers(family),\n 'parties': parties(bibliographic),\n 'citations': citations(bibliographic),\n 'classifications': classifications(bibliographic)\n })",
"def extract_staxml_info(staxml):\n instruments = defaultdict(dict)\n\n if isinstance(staxml, Inventory):\n inv = staxml\n else:\n if os.path.isfile(staxml):\n inv = safe_load_staxml(staxml)\n else:\n raise ValueError(\"Input staxml is neither obspy.Inventory or \"\n \"staxml file\")\n for nw in inv:\n nw_code = nw.code\n for sta in nw:\n sta_code = sta.code\n for chan in sta:\n chan_code = chan.code\n loc_code = chan.location_code\n key = \"%s.%s.%s.%s\" % (nw_code, sta_code, loc_code, chan_code)\n instruments[key][\"latitude\"] = chan.latitude\n instruments[key][\"longitude\"] = chan.longitude\n instruments[key][\"elevation\"] = chan.elevation\n instruments[key][\"depth\"] = chan.depth\n if chan.sensor.description is not None:\n sensor_type = chan.sensor.description\n elif chan.sensor.type is not None:\n sensor_type = chan.sensor.type\n else:\n sensor_type = \"None\"\n instruments[key][\"sensor\"] = sensor_type\n\n return instruments",
"def ParseXML(self, rawXML):\n if \"Search error: API limited due to abuse\" in str(rawXML.items()):\n raise Rule34_Error('Rule34 rejected your request due to \"API abuse\"')\n\n d = {rawXML.tag: {} if rawXML.attrib else None}\n children = list(rawXML)\n if children:\n dd = defaultdict(list)\n for dc in map(self.ParseXML, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {rawXML.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}\n if rawXML.attrib:\n d[rawXML.tag].update(('@' + k, v) for k, v in rawXML.attrib.items())\n if rawXML.text:\n text = rawXML.text.strip()\n if children or rawXML.attrib:\n if text:\n d[rawXML.tag]['#text'] = text\n else:\n d[rawXML.tag] = text\n return d",
"def parse(filename):\n\n tree = etree.parse(filename)\n root = tree.getroot()\n # according to the structure of the xml article meta nested under \n # front then article-meta\n articleMeta = root[0][1]\n # pubmed central article id\n pmcId = ''\n # the author list, the list of names excluding corresponding\n # athor\n otherAuthors = []\n # the name and email of the corresponding authors\n cAuthors = []\n # container for all the author groups\n authorGroups = []\n \n for child in articleMeta:\n # find the pmc id\n if ((child.tag == 'article-id') and not(isEmpty(child.attrib))):\n if (child.attrib['pub-id-type'] == 'pmc'):\n pmcId = child.text\n # find the author group\n elif (child.tag == 'contrib-group'):\n authorGroups.append(child)\n # this child may contain important corresponding information\n elif (child.tag == 'author-notes'):\n authorNotes = child\n # find the publication date\n elif (child.tag == 'history'):\n for theDate in child:\n if ('date-type' in theDate.attrib and theDate.attrib['date-type'] == 'accepted'):\n #publiction date YEAR MONTH DAY\n if (theDate.find('year') != None):\n theYear = theDate.find('year').text\n else:\n theYear = 0\t\n if (theDate.find('month') != None):\n theMonth = theDate.find('month').text\n else:\n theMonth = 6\n if (theDate.find('day') != None):\n theDay = theDate.find('day').text\n else:\n theDay = 1\n\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n elif (child.tag == 'pub-date'): \n if ('pub-type' in child.attrib and (child.attrib['pub-type'] == 'ppub' or child.attrib['pub-type'] == 'epub')):\n #for grandchild in child: print(grandchild.tag)\n \n if (child.find('year') != None):\n theYear = child.find('year').text\n else:\n theYear = 0\n \n if (child.find('month') != None):\n theMonth = child.find('month').text\n else:\n theMonth = 6\n \n if (child.find('day') != None):\n theDay = child.find('day').text\n else:\n theDay = 1\t\t\t\t\t\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n case1 = False # will be used for post-processing, corr author identified but no email\n for authorGroup in authorGroups:\n # parse author group information\n for child in authorGroup:\n if (child.tag == 'contrib' and child.attrib['contrib-type'] == 'author'):\n # the first child is the name tag\n try:\n name = child[0].find('given-names').text + ' ' + child[0].find('surname').text\n except:\n return((-1,))\n if ('corresp' in child.attrib): # and child.attrib['corresp'] == 'yes'):\n # if it a corresponding author\n # check to see if there is email field\n if (len(child) > 2 and child[1].find('email') != None):\n data = (name, child[1].find('email').text)\n cAuthors.append(data)\n #else post-process this case: case(1)\n else:\n data = (name, 'null')\n cAuthors.append(data)\n case1 = True\n else: \n # handle EMBO style xml \n xrefList = findInSubtree(child, 'xref')\n if (len(xrefList) > 0):\n for xref in xrefList:\n if ('ref-type' in xref.attrib and xref.attrib['ref-type'] == 'corresp'):\n # this is an corresponding author\n data = (name, '')\n cAuthors.append(data)\n case1 = True\n if (case1 == False):\n otherAuthors.append(name) \n else:\n # if not a corresponding author\n otherAuthors.append(name)\n\n # not done yet, some corresponding author information are embedded in author-notes\n if (case1 and 'authorNotes' in locals()):\n i = 0\n # corresponding author identified but no email found\n for child in authorNotes:\n if (child.tag == 'corresp'):\n for grandchild in child:\n if (grandchild.tag == 'email'):\n if (i == len(cAuthors)): break\t\n cAuthors[i] = (cAuthors[i][0], grandchild.text)\n i = i + 1\n elif ('authorNotes' in locals()):\n # the linking information is embedded entirely in the text\n text = etree.tostring(authorNotes).strip().decode('utf-8')\n emailElements = findInSubtree(authorNotes, 'email')\n for name in otherAuthors:\n j = 0\n if (text.find(name) != -1 and j < len(emailElements)):\n data = (name, emailElements[j].text)\n cAuthors.append(data)\n otherAuthors.remove(name)\n j = j + 1\n\n # sanity check here, reject anything that may corrupt the database\n if ('pmcId' in locals() and 'publicationDate' in locals()):\n try:\n print(pmcId, otherAuthors, cAuthors, publicationDate)\n except:\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n else:\n return((-1,))",
"def getElementProperties():",
"def read_xml(self):\n connection = urlopen(self.url)\n in_xml = connection.read()\n state = ElementTree.fromstring(in_xml)\n records = []\n record = []\n\n # Specific to CHP\n # TODO(David) Nested for loops are bad. Change this to be more\n # efficient, possibly use generators.\n for center in state:\n rec_center = center.attrib['ID']\n\n for dispatch in center:\n rec_dispatch = dispatch.attrib['ID']\n\n for log in dispatch:\n record = [rec_center, rec_dispatch]\n\n record.append(log.attrib['ID'])\n\n log_time = log.find('LogTime').text.strip('\"')\n log_type = log.find('LogType').text.strip('\"')\n location = log.find('Location').text.strip('\"')\n loc_desc = log.find('LocationDesc').text.strip('\"')\n area = log.find('Area').text.strip('\"')\n\n record.append(log_time)\n record.append(log_type)\n record.append(location)\n record.append(loc_desc)\n record.append(area)\n\n latlon = log.find('LATLON').text.strip('\"')\n\n (lat, lon) = latlon.split(':')\n lat = str(lat[:2]) + '.' + str(lat[2:])\n lon = '-' + str(lon[:3]) + '.' + str(lon[3:])\n\n record.append(lat)\n record.append(lon)\n\n records.append(record)\n\n self.records = records",
"def parsexml(self):\n raise NotImplementedError",
"def parse_xmls_article_ids(self):\n ids_list_d = []\n for x in self.xmls:\n ids_list_d.append(self.parse_one_xml_article_id(x))\n self.list_of_ids_dict = ids_list_d",
"def parse(self):\n result = {}\n if self.detail_statu:\n sel = Selector(text=self.driver.page_source)\n\n fact_table = sel.xpath(\n '//div[@class=\"facts-table\"]//text()').extract()\n result['facts'] = [list(i)\n for i in zip(fact_table[:: 2],\n fact_table[1:: 2])]\n\n tax_table = sel.xpath(\n '//div[@class=\"tax-values\"]//text()').extract()\n result['taxs'] = [list(i)\n for i in zip(tax_table[:: 2],\n tax_table[1:: 2])]\n\n listing_detail = sel.xpath(\n '//div[@class=\"amenities-container\"]//text()').extract()\n result['detail'] = listing_detail\n result['page_source'] = self.driver.page_source\n self.detail_statu = False\n else:\n self.log.warning(\n '---- Detail page url out of reach, use .search() first to get the detail page')\n return result",
"def _from_origin_to_dict(self):\n try:\n for elem in self._xml_tree.getchildren():\n if elem.tag == \"info\":\n for subelem in elem.xpath(\"//Metadata/General/Metas/Titulo\"):\n self._translated_dict[\"root\"][\"title\"] = subelem.text\n return self\n\n except Exception as e:\n logger.error(\"XML Parse Error. %s\" % repr(e))",
"def get_info_from_collections(self, element_soups):\n collections = []\n # Loop through each soup, make CollectionElement, store in collections\n for element_soup in element_soups:\n collections.append(CollectionElement(element_soup))\n # Return list of CollectionElements\n return collections",
"def extract_summary(self):\n metadata = {}\n\n ## document Id\n documentId = self.tree.find(\"./id\")\n documentId = documentId.attrib['root'] if documentId is not None and \"root\" in documentId.attrib else \"\"\n metadata[\"documentId\"] = documentId\n\n ## setId\n setid = self.tree.find(\"./setId\")\n setid = setid.attrib['root'] if setid is not None and \"root\" in setid.attrib else \"\"\n metadata[\"setId\"] = setid\n\n ## version number\n splversion = self.tree.find(\"./versionNumber\")\n versionNumber = \"\"\n if splversion is not None:\n if \"value\" in splversion.attrib:\n versionNumber = splversion.attrib[\"value\"]\n metadata[\"versionNumber\"] = versionNumber\n\n ## product type \n code = self.tree.find(\"./code\")\n check_if_attrib_exists = lambda x, key: x[key] if key in x else ''\n product_type = check_if_attrib_exists(code.attrib, \"displayName\")\n metadata[\"productType\"] = product_type\n\n ## title\n title_text = self.tree_et.xpath(\"./title//text()\")\n title = (\" \".join([self.strip_newline_tab(t) for t in title_text]) if len(title_text) > 0 else \"\")\n metadata[\"title\"] = title\n\n ## manufacturer\n manufacturer = self.tree.find(\"./author//representedOrganization/name\")\n if manufacturer != None and manufacturer.text != None:\n manufacturer = self.strip_newline_tab(manufacturer.text)\n else:\n manufacturer = \"\"\n metadata[\"manufacturer\"] = manufacturer\n\n ## effectivetime\n effectiveTime = self.tree_et.xpath(\"./effectiveTime/@value\")\n effectiveTime = self.__normalize_date(effectiveTime)\n\n metadata[\"effectiveTime\"] = effectiveTime\n metadata[\"publishedDate\"] = effectiveTime\n\n ## From manufacturedProduct section\n brand_name = self.tree_et.xpath(\".//manufacturedProduct//name\")\n brand_name = self.strip_newline_tab(brand_name[0].text) if len(brand_name) > 0 else \"\"\n metadata[\"drugName\"] = brand_name\n\n route = self.tree_et.xpath(\".//manufacturedProduct//formCode/@code\")\n route = self.strip_newline_tab(route[0]) if len(route) > 0 else \"\"\n metadata[\"routeOfAdministration\"] = route\n\n product_ndc = self.tree_et.xpath(\".//manufacturedProduct//code/@code\")\n product_ndc = self.strip_newline_tab(product_ndc[0]) if len(product_ndc) > 0 else \"\"\n metadata[\"ndcCode\"] = product_ndc\n\n generic_name = self.tree_et.xpath(\".//manufacturedProduct//asEntityWithGeneric//genericMedicine/name\")\n generic_name = self.strip_newline_tab(generic_name[0].text) if len(generic_name) > 0 else \"\"\n metadata[\"genericName\"] = generic_name\n\n ## dosage form\n dosage_form = self.tree_et.xpath(\".//manufacturedProduct//formCode/@displayName\")\n dosage_form = dosage_form[0] if len(dosage_form) > 0 else \"\"\n metadata[\"dosageForm\"] = dosage_form\n\n # active ingredients\n substance_name = sorted([self.strip_newline_tab(a.text) for a in\n self.tree_et.xpath(\".//.//manufacturedProduct//activeMoiety/activeMoiety/name\")])\n substance_name = \", \".join(set(substance_name))\n metadata[\"substanceName\"] = substance_name\n\n ## inactive ingredients\n inactive_ingredients = sorted([self.strip_newline_tab(inactive.text) for inactive in self.tree_et.xpath(\n \".//manufacturedProduct//inactiveIngredient/inactiveIngredientSubstance/name\")])\n\n if len(inactive_ingredients) == 0:\n inactive_ingredients = \"\"\n else:\n inactive_ingredients = \",\".join(set(inactive_ingredients))\n\n metadata[\"inactiveIngredients\"] = inactive_ingredients\n\n ## other ingredients\n ingredients = sorted([self.strip_newline_tab(ingredient.text) for ingredient in\n self.tree_et.xpath(\".//manufacturedProduct//ingredient/ingredientSubstance/name\")])\n\n if len(ingredients) == 0:\n ingredients = \"\"\n else:\n ingredients = \", \".join(set(ingredients))\n metadata[\"ingredients\"] = ingredients\n\n # marketing_category\n marketing_category = self.tree_et.xpath(\".//manufacturedProduct/subjectOf/approval/code/@displayName\")\n marketing_category = self.strip_newline_tab(marketing_category[0]) if len(marketing_category) > 0 else \"\"\n metadata[\"marketingCategory\"] = marketing_category\n\n # consumed in\n consumed_in = self.tree_et.xpath(\n \".//manufacturedProduct//consumedIn/substanceAdministration/routeCode/@displayName\")\n consumed_in = consumed_in[0] if len(consumed_in) > 0 else \"\"\n metadata[\"consumedIn\"] = consumed_in\n\n # revision date\n marketing_date = self.tree_et.xpath(\".//manufacturedProduct//marketingAct/effectiveTime/low/@value\")\n marketing_date = self.__normalize_date(marketing_date)\n metadata[\"marketingDate\"] = marketing_date\n\n return metadata",
"def _get_geoms(self, root, _parent=None):\n # Initialize return array\n geom_pairs = []\n # If the parent exists and this is a geom element, we add this current (parent, element) combo to the output\n if _parent is not None and root.tag == \"geom\":\n geom_pairs.append((_parent, root))\n # Loop through all children elements recursively and add to pairs\n for child in root:\n geom_pairs += self._get_geoms(child, _parent=root)\n # Return all found pairs\n return geom_pairs",
"def parse_journal_article_record(root) -> dict:\n\n # print(\"Root\", root)\n # pmid = root.find(\"PMID\").text\n # print(\"PMID\", pmid)\n # quit()\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//ArticleTitle/text()\")), \"\")\n\n # TODO https:.//stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element\n atext = next(iter(root.xpath(\".//Abstract/AbstractText/text()\")), \"\")\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(doc[\"pmid\"], pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n doc[\"journal_title\"] = next(iter(root.xpath(\".//Journal/Title/text()\")), \"\")\n doc[\"joural_iso_title\"] = next(iter(root.xpath(\".//Journal/ISOAbbreviation/text()\")), \"\")\n doc[\"doi\"] = next(iter(root.xpath('.//ArticleId[@IdType=\"doi\"]/text()')), None)\n\n doc[\"compounds\"] = []\n for chem in root.xpath(\".//ChemicalList/Chemical/NameOfSubstance\"):\n chem_id = chem.get(\"UI\")\n doc[\"compounds\"].append({\"id\": f\"MESH:{chem_id}\", \"name\": chem.text})\n\n compounds = [cmpd[\"id\"] for cmpd in doc[\"compounds\"]]\n doc[\"mesh\"] = []\n for mesh in root.xpath(\".//MeshHeading/DescriptorName\"):\n mesh_id = f\"MESH:{mesh.get('UI')}\"\n if mesh_id in compounds:\n continue\n doc[\"mesh\"].append({\"id\": mesh_id, \"name\": mesh.text})\n\n return doc",
"def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]",
"def _process_article(el):\n assert len(el) == 1 # We only expect one article record\n el = el[0]\n\n all_tags = [c.tag for c in el.getchildren()]\n title = el.xpath('ArticleTitle')[0].text\n publication_types = [\n pt.text for pt in el.xpath('PublicationTypeList/PublicationType')]\n if all([pt not in DESIRED_PUBLICATION_TYPES for pt in publication_types]\n ) or \\\n any([pt in EXCLUDED_PUBLICATION_TYPES for pt in publication_types]\n ):\n raise PublicationTypeException\n # Only keep the publication types we're interested in\n publication_types = set(\n publication_types).intersection(DESIRED_PUBLICATION_TYPES)\n abstract = el.xpath(\n 'Abstract/AbstractText')[0].text if 'Abstract' in all_tags else None\n return {'title': title,\n 'abstract': abstract,\n 'publication_types': publication_types}",
"def items(self):\r\n return self.elements.values()",
"def return_xml_dict(self):\n\ttry:\n\t self.Correct_MultiRoot_XML()\n\t self.subtitle_dict = sorted(self.xmltodict(self.XML_String)['subtitle'],key=itemgetter('imdb','cd'))\n\t #self.subtitle_dict = self.xmltodict(self.XML_String)['subtitle']\n\t print \"XML subtitle list downloaded and converted to dict\"\n\t return True\n\texcept:\n\t print \"XML subtitle list not downloaded or converterd.\"\n\t return False",
"def parse(self):\n return []",
"def parse_one_xml(xml_file, fields=None):\r\n tree = etree.iterparse(xml_file)\r\n \r\n d = dict.fromkeys(fields)\r\n for event, elem in tree:\r\n if elem.tag == 'SAMPLE_ATTRIBUTE':\r\n for f in fields:\r\n if elem.getchildren()[0].text == f:\r\n d[f] = elem.getchildren()[1].text \r\n return d",
"def results(self) -> Dict[str, Any]:\n return self.nodes",
"def get_attrs(post_content):\n for attribute in post_content.find_all(\"p\", {\"class\": \"attrgroup\"}):\n for attr in attribute.find_all(\"span\"):\n attr_text = attr.text.strip()\n if attr_text:\n yield attr_text.lower()",
"def get_element_pdos(dos,el):\n \n el_dos = {}\n for site, atom_dos in dos.pdos.items(): \n ## .items() return (key,value) pairs\n if site.specie == Element(el):\n for orb, pdos in atom_dos.items():\n if orb not in el_dos:\n el_dos[orb] = pdos\n else:\n el_dos[orb] = add_densities([el_dos[orb], pdos])\n\n return {orb: Dos(dos.efermi, dos.energies, densities)\n for orb, densities in el_dos.items()}",
"def getReferenceDetails(soup):\n refDict = {}\n refs = soup.find_all('edmx:reference')\n for ref in refs:\n includes = ref.find_all('edmx:include')\n for item in includes:\n if item.get('namespace') is None or ref.get('uri') is None:\n rsvLogger.error(\"Reference incorrect for: \", item)\n continue\n if item.get('alias') is not None:\n refDict[item['alias']] = (item['namespace'], ref['uri'])\n else:\n refDict[item['namespace']] = (item['namespace'], ref['uri'])\n refDict[item['namespace'].split('.')[0]] = (item['namespace'], ref['uri'])\n return refDict",
"def get_attributes_from_child(child):\n return [{'element': child,\n 'attribute': x.attrib,\n 'tag': x.tag,\n 'keys': x.keys()} for x in child]",
"def parse_mapping_page(self, id, body):\n info = {}\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info",
"def process_subtags(element, node):\n \n for tag in element.iter(\"tag\"):\n tag_key = tag.attrib['k']\n tag_val = tag.attrib['v']\n \n # Check for problem characters\n if problemchars.match(tag_key):\n continue\n \n # fix tag 'v' attribute of streetname and postcode\n elif tag_key.startswith(\"addr:\"):\n if not \"address\" in node.keys():\n node[\"address\"] = {}\n addr_key = tag.attrib['k'][len(\"addr:\") : ]\n if lower_colon.match(addr_key):\n continue\n else:\n if tag.attrib['k'] == \"addr:street\":\n fixed_v, change = correct_street_type(tag_val)\n elif tag.attrib['k'] == \"addr:postcode\":\n fixed_v, change = correct_postcode(tag.attrib['v'])\n else:\n fixed_v = tag_val\n if fixed_v != None:\n node[\"address\"][addr_key] = fixed_v\n \n # fix fax and phone number\n elif tag_key == \"fax\" or tag_key == \"phone\":\n fixed_v, chang = correct_number(tag_val)\n node[tag_key] = fixed_v\n \n #fix multiple tag_key confusing. These two tag_key in the list have same meaing, \n #so just keep the latter one in the list and change the former to the latter\n elif tag_key in [ u'应急避难场所疏散人数万人',u'应急避难场所疏散人口万人']:\n node[u'应急避难场所疏散人口万人'] = tag_val\n \n # '疏散人数' and '疏散人数(万)' are two similar tag_key. Inthis way below, we change '疏散人数' to '疏散人数(万)'\n # by doing some math.\n elif tag_key == u'疏散人数':\n node[u'疏散人数(万)'] = str(round(float(tag_val.split()[0].replace(',',''))/10000,2))\n elif tag_val != None:\n node[tag_key] = tag_val\n \n return node",
"def parse(fname):\n\n tree = pbsXml.parse(fname)\n\n if not tree:\n tools.error('failed to parse pbsdump xml file ' + fname)\n return 0\n\n root = tree.getroot()\n\n nodes = dict() # Hold list of nodes\n\n # Iterate on all Node items\n for child in root.findall('Node'):\n # Get node name\n name = child.find('name').text\n\n # Build new entry for the given node\n nodes[name] = dict()\n node = nodes[name]\n matches = GET_NODE_ID.match(name)\n node[id] = int(matches.group(1))\n\n # Collect data\n node['name'] = name\n node['np'] = int(child.find('np').text)\n node['state'] = child.find('state').text\n node['power_state'] = child.find('power_state').text\n data = child.find('jobs')\n if data is not None:\n node['jobs'] = data.text\n else:\n node['jobs'] = None\n\n node['nb_sockets'] = child.find('total_sockets').text\n node['nb_numa_nodes'] = child.find('total_numa_nodes').text\n props = child.find('properties').text\n node['properties'] = props.split(',')\n\n # Get the status entries\n node['status'] = dict()\n data = child.find('status')\n if data is None:\n tools.error('Node ' + name + \" has no status entry! Skipped.\")\n continue\n\n status = data.text\n status_list = status.split(',')\n for entry in status_list:\n data = entry.split('=')\n matches = IS_BYTE_SIZE.match(data[1])\n if matches:\n # Convert whatever size in GB\n data[1] = tools.size_convert(matches.group(1), matches.group(2), 'gb')\n\n # Keep the data\n node['status'][data[0]] = data[1]\n\n return nodes"
] |
[
"0.63759995",
"0.620662",
"0.6160831",
"0.6134572",
"0.6060549",
"0.6026878",
"0.60247046",
"0.5937487",
"0.5935178",
"0.5895591",
"0.5833998",
"0.58304346",
"0.5824379",
"0.58219045",
"0.5707903",
"0.5669245",
"0.5667443",
"0.56660455",
"0.5661833",
"0.5661833",
"0.5661279",
"0.562904",
"0.5626109",
"0.5598012",
"0.558992",
"0.558302",
"0.55548936",
"0.5547756",
"0.5523492",
"0.5521544",
"0.5508013",
"0.5482391",
"0.5474641",
"0.5467026",
"0.546035",
"0.5444682",
"0.54395217",
"0.54169565",
"0.5405097",
"0.54004335",
"0.540019",
"0.5395445",
"0.53929245",
"0.5364471",
"0.5357936",
"0.53542644",
"0.5335221",
"0.53296626",
"0.5327882",
"0.53183645",
"0.53169847",
"0.5315984",
"0.53077614",
"0.5271714",
"0.52644044",
"0.5263715",
"0.526105",
"0.5256422",
"0.52552193",
"0.52539897",
"0.5251073",
"0.5248198",
"0.52456754",
"0.5240326",
"0.52402717",
"0.52374476",
"0.52357745",
"0.52187836",
"0.52187425",
"0.52184474",
"0.5215375",
"0.5202723",
"0.5193679",
"0.51877254",
"0.51791537",
"0.51762235",
"0.51756275",
"0.5175373",
"0.5174338",
"0.51733166",
"0.5165284",
"0.5156484",
"0.5148173",
"0.51477426",
"0.51379347",
"0.5136722",
"0.5133677",
"0.5126253",
"0.51259637",
"0.5122264",
"0.511351",
"0.510984",
"0.51052547",
"0.5095813",
"0.5091032",
"0.5089635",
"0.5084069",
"0.5083835",
"0.50760216",
"0.5075776"
] |
0.6353038
|
1
|
(str) > bool Return True if correct mail type
|
def is_mail_types_correct(self, mail_type):
return mail_type in self.mail_types
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def email_type(verifield, required):\n return verifield is None or parseaddr(verifield) != ('','')",
"def is_class_mail_types_correct(self, class_mail_type):\r\n return class_mail_type in self.class_mail_types",
"def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret",
"def verify_mail(self):\n raise NotImplementedError",
"def __validate_email(self,mail):\n if re.match(r\"[\\w\\W]*@+[\\w\\W]*[.]+[\\w]{2,4}\",mail):\n return True\n return False",
"def check_eligible_mimetype(self, ctype, uid):\n self.helper.log_debug(\n 'check_eligible_mimtype: checking content-type %s of msg uid %s' %\n (ctype, uid))\n if ctype == \"application/zip\":\n return True\n elif ctype == \"application/gzip\":\n return True\n elif ctype == \"application/x-gzip\":\n return True\n elif ctype == \"application/octet-stream\":\n # Non-standard mimetype used by Amazon SES dmarc reports\n return True\n elif ctype == \"application-x-gzip\":\n # Non-standard mimetype used by Comcast dmarc reports\n return True\n elif ctype == \"application/x-zip-compressed\":\n # Non-standard mimetype used by Yahoo dmarc reports\n return True\n elif ctype == \"application/xml\":\n return True\n elif ctype == \"text/xml\":\n return True\n else:\n self.helper.log_debug(\n 'check_eligible_mimtype: skipping content-type %s of msg uid %s' %\n (ctype, uid))\n return False",
"def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t",
"def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')",
"def check_mail(eml):\n return eml[::-1] if eml != '#N/A' else '#N/A'",
"def _is_valid_content_type_format(content_type: str) -> bool:\n return (\n _is_valid_ct(content_type)\n or _is_valid_pt(content_type)\n or _is_valid_set(content_type)\n or _is_valid_list(content_type)\n or _is_valid_dict(content_type)\n or _is_valid_union(content_type)\n or _is_valid_optional(content_type)\n )",
"def test(types, _):\n return 'Date' in types and 'Postal Code' in types",
"def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False",
"def is_email_address(value):\n return _COMPILED[EMAIL].match(value) != None",
"def validate_type(type):\n\n types_upper = [i.upper() for i in officeTypes]\n if type.upper() in types_upper:\n return True\n return False",
"def _validate_content_type(\n content_type: str, content_name: str, performative: str\n) -> Tuple[bool, str]:\n if not _is_valid_content_type_format(content_type):\n return (\n False,\n \"Invalid type for content '{}' of performative '{}'. See documentation for the correct format of specification types.\".format(\n content_name,\n performative,\n ),\n )\n\n return (\n True,\n \"Type of content '{}' of performative '{}' is valid.\".format(\n content_name, performative\n ),\n )",
"def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False",
"def is_valid_content_type(cls, content_type: str) -> bool:\n return content_type in cls.CONTENT_TYPES.value",
"def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)",
"def is_my_type(type_str):\n raise NotImplementedError()",
"def is_valid_email(email):\n return \"@\" in email and \".\" in email",
"def is_voicemail(self):\n return self._is_voicemail",
"def check_email_required(document_text):\n if \"visitor[email]\" in document_text:\n return True\n else:\n return False",
"async def can_send_modmail(self, user: discord.User):\n blocked = await self.config.user(user).get_raw(\"blocked\")\n type_waiting = await self.config.user(user).get_raw(\"type_holding\")\n if blocked:\n raise UserIsBlocked\n if type_waiting:\n raise WaitingForMessageType(\n \"Please choose type of message you wish to send\"\n )\n thread_open = await self.config.user(user).get_raw(\"thread_is_open\")\n current_thread = await self.config.user(user).get_raw(\"current_thread\")\n\n if thread_open:\n return True, current_thread\n else:\n return False, None",
"def is_string(self):\n answer = self._call('is_string')\n return answer.yes",
"def vet_email(email_address):\n ## FIXME: Doesn't warn user!\n if not re.match(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', email_address):\n return False\n local_part = re.sub(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', '\\\\1', email_address)\n domain_part = re.sub(r'^([^@\\s]+)@((?:[-a-z0-9]+\\.)+[a-z]{2,})$', '\\\\2', email_address)\n if len(local_part) > 64:\n return False\n if len(domain_part) > 255:\n return False\n return True",
"def is_valid_license_type(self):\n clean = self.license_type.lower().replace('-', ' ')\n return clean not in INVALID_LICENSE_TYPE",
"def isEmail(email):\n if not re.match(r\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\\\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\\\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\", email):\n return(0)\n return(1)",
"def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode",
"def validatePhoneNumberType(self):\n ## Declaring a Flag to control a while loop\n phone_number_type_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_type_ok:\n if self.phone_number_type.lower() in self.valid_phone_number_types:\n phone_number_type_ok = True\n return True\n\n else:\n return False",
"def _check_mimetype(self):\n if self.mimetype in Config.aliases:\n mimetype = Config.aliases[self.mimetype]\n else:\n mimetype = self.mimetype\n expected_extensions = mimetypes.guess_all_extensions(mimetype,\n strict=False)\n if expected_extensions:\n if self.has_extension and self.extension not in expected_extensions:\n # LOG: improve this string\n self.make_dangerous('expected extensions')",
"def IsValidEmail(s):\n return RE_EMAIL_ONLY.match(s)",
"def is_email(address):\n try:\n validate_email(address)\n return True\n except:\n return False",
"def is_text(self):\n return self.value_type in (str, unicode)",
"def is_special_message(self):\n if not self.is_valid():\n return False\n \n # TODO: what if the author is wrong? then these don't match at all!\n for nickname in AUTHOR_TO_NICKNAME[self.author]:\n \n if self.content == f\"{nickname} changed the chat theme.\":\n return True\n \n if self.content == f\"{nickname} joined the video chat.\":\n return True\n \n if self.content == f\"{nickname} joined the call.\":\n return True\n \n if self.content.startswith(f\"{nickname} named the group\"):\n return True\n \n if self.content == f\"{nickname} removed the group name.\":\n return True\n \n if self.content == f\"{nickname} sent a link.\":\n return True\n \n if self.content == f\"{nickname} sent an attachment.\":\n return True\n \n if self.content.startswith(f\"{nickname} set the emoji to\"):\n return True\n \n if self.content == f\"{nickname} changed the group photo.\":\n return True\n \n if is_add_remove_member(self.content, nickname):\n return True\n\n if is_set_nickname(self.content, nickname):\n return True\n \n if is_clear_nickname(self.content, nickname):\n return True\n \n if is_create_group(self.content, nickname):\n return True\n if self.content == f\"{nickname} started a video chat.\":\n return True\n \n if self.content == f\"{nickname} left the group.\":\n return True\n \n if is_poll_message(self.content, nickname):\n return True\n return False",
"def is_valid(self, email=None):\n if not email:\n return False\n\n # RFC 3696\n # In addition to restrictions on syntax, there is a length limit on email addresses.\n # That limit is a maximum of 64 characters (octets) in the \"local part\" (before the \"@\")\n # and a maximum of 255 characters (octets) in the domain part (after the \"@\") for a total\n # length of 320 characters. However, there is a restriction in RFC 2821 on the length of\n # an address in MAIL and RCPT commands of 254 characters. Since addresses that do not fit\n # in those fields are not normally useful, the upper limit on address lengths should\n # normally be considered to be 254.\n\n if len(email) > 254:\n return False\n\n parts = email.split('@')\n if len(parts) > 2 or len(parts[0]) > 64 or len(parts[1]) > 255:\n return False\n\n if not re.match('[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+(?:\\.[a-z0-9\\!\\#\\$\\%\\&\\'\\*\\+\\/\\=\\?\\^\\_\\`\\{\\|\\}\\~\\-]+)*', email.lower()):\n return False\n # A valid mail exchange server is configured!\n return self.valid_mx(parts[1])",
"def test_if_str(self):\n self.assertTrue(type(self.new.email) is str)\n self.assertTrue(type(self.new.password) is str)\n self.assertTrue(type(self.new.first_name) is str)\n self.assertTrue(type(self.new.last_name) is str)",
"def is_allowed_email(email):\n if email and not is_regex_email(email):\n return \"That is not a valid email.\"\n else:\n return \"\"",
"def validate_email(email:str) -> bool:\r\n return email.count(\"@\") == 1 and email.count(\".\") >= 1 and len(email) > 6",
"def is_valid_case_type(case_type):\n return bool(_case_type_regex.match(case_type or ''))",
"def test_sendmail(self):\n assert self.rc_conf.has_key('sendmail_enable')\n assert self.rc_conf['sendmail_enable'] == '\"NONE\"'",
"def typeValidator(self, food_type):\n if type(food_type) != str:\n API.abort(400, error_messages[16]['int_type'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_ ]+$)\", food_type) or food_type.isspace():\n API.abort(\n 400, error_messages[17]['wrong_format_ty'])\n\n return True",
"def isValidType(self, fsn, fsn_type):\n if ((OINKM.checkIfFSN(fsn)) and (\"SEO\" in fsn_type)) or (not(OINKM.checkIfFSN(fsn)) and (\"SEO\" not in fsn_type)):\n #If the value in the fsn field is an FSN and the description type is an SEO type, then it could be invalid.\n #If the value in the fsn field is not an FSN and the description type is not an SEO type, then it could be invalid.\n if \"SEO\" in fsn_type:\n question = \"You seem to be writing an FSN article but the description type appears to be an SEO. Are you sure you want to submit that?\"\n else:\n question = \"You seem to be writing about something that's not an FSN. Are you sure you want to submit that?\"\n change_type = QtGui.QMessageBox.question(\n self,\n \"Possible Description Type Mismatch\",\n question,\n QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, \n QtGui.QMessageBox.No\n )\n if change_type is not None:\n if change_type == QtGui.QMessageBox.Yes:\n is_valid = True\n else:\n is_valid = False\n else:\n is_valid = False\n else:\n #If the value in the FSN field is an FSN and the type is not an SEO type.\n #if the value is not an FSN and the type is one of the SEO types.\n is_valid = True\n return is_valid",
"def email_checker(email):\n regex = '^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$'\n if re.search(regex, email):\n return True\n else:\n return False",
"def get_email_content_from_type(self, type):\n if type is 1:\n return strings.CIRCUIT_CREATED_EMAIL_NOTIFICATION\n # case circuit is favorited\n if type is 2:\n # parse the route from info textfield\n route = self.info[self.info.find('circuit'):]\n route = route[:route.find('\\n')]\n route = route[route.find(':')+2:]\n try:\n circuit = Circuit.objects.get(pk=route)\n circuit_name = circuit.name\n except Circuit.DoesNotExist:\n circuit_name = 'Route :)'\n # parse the user from info textfield\n us = self.info[self.info.find('user')+7:]\n us = us[:us.find(',')]\n try:\n user = User.objects.get(pk=us)\n user = user.get_full_name()\n except User.DoesNotExist:\n user = 'Some Worldrat user :)'\n return strings.CIRCUIT_FAVORITED_EMAIL_NOTIFICATION % {\n 'route':circuit_name, 'user': user,\n }\n\n if type is 3:\n # parse the route from info textfield\n route = self.info[self.info.find('original_circuit')+19:]\n route = route[:route.find(',')]\n try:\n circuit = Circuit.objects.get(pk=route)\n circuit_name = circuit.name\n except Circuit.DoesNotExist:\n circuit_name = 'Route :)'\n # parse the user from info textfield\n us = self.info[self.info.find('user')+7:]\n us = us[:us.find(',')]\n try:\n user = User.objects.get(pk=us)\n user = user.get_full_name()\n except User.DoesNotExist:\n user = 'Some Worldrat user :)'\n return strings.CIRCUIT_REMIXED_EMAIL_NOTIFICATION % {\n 'route':circuit_name, 'user': user, \n }\n if type is 4:\n return strings.CIRCUIT_UPDATED_EMAIL_NOTIFICATION\n if type is 5:\n return strings.USER_FOLLOWED_EMAIL_NOTIFICATION\n if type is 6:\n return strings.CONTENT_SHARED_EMAIL_NOTIFICATION",
"def stringable(self):\n return True",
"def is_text( self ):\n return self.get_main_type() == 'text'",
"def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True",
"def check_proc_type(image_proc_type):\n\n if isinstance(image_proc_type, str):\n return True\n else:\n logging.warning('Image processing type is not a string')\n print('Please choose only one processing technique.')\n raise ValueError('Please choose only one processing technique.')",
"def is_secondary_email_feature_enabled():\n return waffle.switch_is_active(ENABLE_SECONDARY_EMAIL_FEATURE_SWITCH)",
"def ISTEXT(value):\n return isinstance(value, (basestring, AltText))",
"def is_valid_type(self, question_type):\n\t\treturn question_type in self.valid_types",
"def _IsValidPrimaryOwnerEmail(owner_tag_text):\n if '-' in owner_tag_text: # Check whether it's a team email address.\n return False\n\n return (owner_tag_text.endswith('@chromium.org')\n or owner_tag_text.endswith('@google.com'))",
"def checkMailAddress(obj, someAddr):\n # #5353 use checkEmailAddress from CMFDefault utils instead of\n # validateSingleEmailAddress from plone_utils as the plone email validator \n # is better at detecting invalid addreses\n try:\n checkEmailAddress(someAddr)\n except EmailAddressInvalid:\n return False\n return True",
"def isString(x):\n if type(x) == str:\n return True\n else:\n return False",
"def check_type(self):\n return True",
"def email_validator(email):\n if len(email) > 6:\n if re.match(REGEX_EXP, email) != None:\n return True\n return False",
"def is_type(self, ent_type):\n # type: (str) -> bool\n # its always an entity ...\n if ent_type.lower() in ('entity', self.settings['_type'].lower()):\n return True\n else:\n return False",
"def validate_email(self, email):\n return models.Email.normalize_address(email)",
"def test_compose_email2_good(self):\n pass",
"def _email_is_valid(email):\n return VALID_EMAIL_REGEXP.match(email) is not None",
"def is_valid_email(self, email):\n rex = \"^[\\w]+[\\d]?@[\\w]+\\.[\\w]+$\"\n return re.match(rex, email)",
"def is_string(value):\n return isinstance(value, (str, bytes))",
"def verify(self):\n data = [\"rfc\", \"tel\", \"email\", \"name\", \"use\"]\n state = False\n for item in data:\n if getattr(self, item + \"Input\").text() != \"\":\n state = True\n else:\n return False\n return state",
"def validate_email_address(self, address):\n if self._re_email.search(address.lower()) is None:\n return False\n return True",
"def convert(self,message):\n \n content_type = message.get('content',{}).get('@type','')\n if content_type in self.supported:\n result = getattr(self.tconv, content_type)(message)\n else:\n return False\n \n return result",
"def validate_typeID(self, type_ID):\n if type(type_ID) == str:\n for letter in type_ID:\n if not letter.isalpha() and not letter.isdigit():\n return False\n return True\n return False",
"def username(provider, username):\n\n if provider == 'alditalk':\n if username.isdigit(): # only mobile number\n return True\n else:\n return False\n elif provider == 'netzclub': # mobile number and email\n if ((username.isdigit()) or (\"@\" in username)):\n return True\n else:\n return False\n elif provider == 'congstar':\n return True",
"def streettype(self):\n if self.index >= self.length:\n return False\n \n self._typ, n = self.parse_streettype()\n if self._typ is not None:\n self.idx_typ = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == '.':\n self.index += 1\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"TYP\", self._typ, self.idx_typ)\n self.isaddr = True\n return True\n return False",
"def test_is_valid_email(self):\n self.assertTrue(is_valid_email('abc@example.com'))",
"def test_string(self,address):\n t=type(address) == str\n assert t, \"not a string\"",
"def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)",
"def is_valid_email_address (email):\n return valid_email.search(email)",
"def has_validated_email(self):\n return self.receipt_diploma_uploaded_at is not None",
"def match_mime_type(self, src: str):\n for key in self.keys():\n if Pattern.test(key, src):\n return self[key]\n return \"text/plain\"",
"def email_protection_flag(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"email_protection_flag\")",
"def email_protection_flag(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"email_protection_flag\")",
"def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)",
"def _type_validator(self, type=None):\n if type not in ['agents', 'users', 'groups']:\n type = 'users'\n return type",
"def _is_valid_pt(content_type: str) -> bool:\n content_type = content_type.strip()\n return content_type in SPECIFICATION_PRIMITIVE_TYPES",
"def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )",
"def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False",
"def is_valid_email(email):\n subdomain = HunterService._get_domain_for_email(email)\n try:\n result = hunter.domain_search(subdomain)\n except HTTPError as e:\n logging.info('Skipping hunter.io services. REASON: %s', str(e))\n\n return True\n\n return result['webmail'] or bool(result['emails'])",
"def is_valid_type(type):\n return type in type_to_adapter",
"def test_compose_email_good(self): \n pass",
"def test_compose_email_somebad(self):\n pass",
"def test_email():\n assert is_email(None) is None\n assert is_email('test@example.com') is None\n assert is_email('other')",
"def do_type(self, str_arg):\n try:\n self.adbc.type(validateString(str_arg))\n except Exception, e:\n printLog(self.threadName + 'TYPE FAILED: %s' % e.message)\n self.resultFlag = False\n finally:\n return self.resultFlag",
"def is_email_valid(e_mail):\n pattern = re.compile(r\"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\")\n result = False\n if pattern.match(e_mail):\n result = True\n return result",
"def email_protection_flag(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"email_protection_flag\")",
"def email_body_verify_email_address(url, code): #bug267\n\tmsg = \"\"\n\treturn msg",
"def IsType(self, rule_type_name):\n return rule_type_name == 'log_url'",
"def validate(sender_id, message_text, attachment_type, postback, quick_reply):\n\n if message_text:\n return True, dict()\n else:\n return False, dict(message_text='Want to add some tags?')",
"def _validate_type(self, key, type_):\n if type_ is None:\n type_ = \"\"\n \n if not isinstance(type_, (str, unicode)):\n raise TypeError(\"FileLink.type should be a str or unicode, \"\n \"not %s\" % type_.__class__.__name__)\n \n return type_",
"def is_email_mask(self, names: pd.Series) -> pd.Series:\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message=\"This pattern has match groups. To actually get the group\")\n return names.str.contains(self.email_extract_re)",
"def __bool__(self):\n return _libsbml.string___bool__(self)",
"def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)",
"def valid_email(line):\n email = line.o_email\n is_valid = validate_email(email)\n if not is_valid:\n rule = 'Email validation'\n new_row = Error(e_name=rule, order_key=line.primary_key)\n line.errors.append(new_row)\n return False\n return True",
"def is_valid_email(email):\n if re.search(EMAIL_REGEX, email):\n return True\n else:\n return False",
"def is_string(value):\n return isinstance(value, basestring)",
"def DataIsString(self):\n return self.data_type in (definitions.REG_SZ, definitions.REG_EXPAND_SZ)"
] |
[
"0.73987883",
"0.6967432",
"0.67043835",
"0.62827927",
"0.62815034",
"0.61875695",
"0.60421157",
"0.5943893",
"0.590512",
"0.5892637",
"0.587241",
"0.5855261",
"0.5816171",
"0.5768737",
"0.56882554",
"0.56593686",
"0.56416327",
"0.56387013",
"0.5634344",
"0.55975133",
"0.55863184",
"0.558388",
"0.5576697",
"0.5551998",
"0.555003",
"0.55410945",
"0.55377686",
"0.55373394",
"0.54792535",
"0.5475107",
"0.5468073",
"0.54571044",
"0.545335",
"0.5440356",
"0.54397225",
"0.5435383",
"0.54180926",
"0.54076415",
"0.5404245",
"0.5401953",
"0.5398435",
"0.53852946",
"0.53709394",
"0.5369081",
"0.53613037",
"0.5359185",
"0.53401315",
"0.5332887",
"0.53284794",
"0.5325486",
"0.5318757",
"0.53138113",
"0.52980304",
"0.52973455",
"0.529397",
"0.5288592",
"0.52877015",
"0.5286879",
"0.5286549",
"0.528355",
"0.52776223",
"0.5275752",
"0.52739",
"0.5271276",
"0.5259117",
"0.5250425",
"0.5248858",
"0.5245959",
"0.5240958",
"0.5233422",
"0.523212",
"0.52287227",
"0.5226628",
"0.52251124",
"0.521856",
"0.521856",
"0.52168316",
"0.52092904",
"0.5205641",
"0.5203379",
"0.5201373",
"0.51989686",
"0.5198799",
"0.51962566",
"0.5196094",
"0.5193948",
"0.51925397",
"0.51917523",
"0.51915926",
"0.5175735",
"0.51740396",
"0.5169989",
"0.51648456",
"0.5161553",
"0.516039",
"0.51555794",
"0.5150771",
"0.51492393",
"0.5146519",
"0.5146083"
] |
0.7832249
|
0
|
(class xml.etree.ElementTree.Element) > list Return dictionary with all Postage information
|
def get_postage_from_response(self, xml_response):
services = xml_response.find("Package").findall("Service")
postages_list = []
if services:
for postages in services:
postages_list.append(postages.find("Postage").text)
return postages_list
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def xml_to_dict(self):\n medicine_node = ['medicines', 'dose', 'unit', 'unit_price', 'goods_num', 'dose_that',\n 'remark', 'm_usage', 'goods_norms', 'goods_orgin', 'MedPerDos', 'MedPerDay']\n dict_data = {}\n n = 0\n xq_childs = 0\n for child in self.root.iter():\n # print(child.tag)\n # print(child.text)\n if child.tag not in medicine_node:\n if child.tag == 'xq':\n xq_childs = child.__len__() # __len__:返回元素大小,元素的大小为元素的子元素数量\n dict_data[child.tag] = child.text\n else:\n if n < xq_childs:\n dict_data[child.tag] = [child.text]\n n += 1\n else:\n dict_data[child.tag].append(child.text)\n return dict_data",
"def get_postage_from_response(self, xml_response):\r\n postages = xml_response.find(\"Package\").findall(\"Postage\")\r\n postages_list = []\r\n\r\n if postages:\r\n for postage in postages:\r\n postages_list.append(self.get_response_information(postage))\r\n\r\n return postages_list",
"def _construct_data_xml(self, xml_file_list):\n award_dict = {}\n award_list = []\n for xml_file in xml_file_list:\n xml_file.seek(0)\n tree = ET.parse(xml_file)\n root = tree.getroot()\n\n for response in root:\n temp_dict = {}\n for award in response:\n if award.tag == 'entry':\n continue\n try:\n # temp_dict[award.tag].append(award.text)\n temp_dict[award.tag] = award.text\n except KeyError:\n print(\"KeyError\")\n # temp_dict[award.tag] = [award.text]\n\n # if 'entry' in temp_dict.keys():\n # del temp_dict['entry']\n if len(temp_dict) > 0:\n award_list.append(temp_dict)\n\n return award_list",
"def extract_xml(self, xml_list):\n craziness = dict()\n for i in range(len(xml_list)):\n if xml_list[i]['@type'] == 'EMBL':\n craziness['EMBL']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'RefSeq':\n craziness['RefSeq']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Ensembl':\n craziness['Ensembl']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'OrthoDB':\n craziness['OrthoDB']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'PROSITE':\n craziness['PROSITE']=(xml_list[i]['@id'])\n elif xml_list[i]['@type'] == 'Pfam':\n craziness['Pfam']=(xml_list[i]['@id'])\n return craziness",
"def parse_element(elem):\n return_dict = {}\n for e in elem:\n return_dict[e.tag] = e.text\n return return_dict",
"def dict(self):\n return xmltodict.parse(str(self))",
"def _get_information(self):\n pros_cons = []\n pros_cons_dict = {}\n\n for i, div in enumerate(self._tab.find_all(\"div\")):\n for p in div.find_all(\"p\"):\n pro_con = p.get_text(strip=True)\n pros_cons.append(pro_con)\n pros_cons_dict.update({self._keys_dict[i]: pros_cons})\n pros_cons = []\n\n return pros_cons_dict",
"def xml_children_as_dict(node):\n return dict((e.tag, e.text) for e in node)",
"def get_attributes(self):\n\t\treturn dict(list(self.__element.items()))",
"def _parse_xml(self):\n self.properties = {}\n pages = self.root.findall('page')\n self.pages = {} \n\n for page_num, page in enumerate(pages): \n\n _, _ , width, height = page.attrib[\"bbox\"].split(\",\")\n width, height = float(width), float(height)\n \n page_object = {\"page\": page_num + 1 , \"width\": width, \"height\": height} \n lines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n print(\"{} Number of Lines in Page {}\".format(len(lines), page_num))\n \n self.bbox = {'x1': [] , 'y1':[], 'x2':[], 'y2':[]}\n textlines = self.root.findall('page[@id=\\'{}\\']/textbox/textline'.format(page_num+1)) \n textlines = sorted(textlines, key= lambda x: -float(x.attrib['bbox'].split(',')[3]))\n \n \n line_objects = []\n for idx, item in enumerate(textlines):\n item_props = self._extract_textline_properties(item)\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3])\n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n\n line_objects.append(item_props)\n page_object[\"lines\"] = line_objects\n \n \n others = [] \n# for key in [\"rect\", \"figure\", \"layout/textgroup\", \"curve\"]: \n for key in [\"curve\", \"rect\", \"figure\"]: \n other_objs = self.root.findall('page[@id=\\'{}\\']/{}'.format(page_num+1, key)) \n for idx, item in enumerate(other_objs):\n \n item_props = {\"type\": key}\n# print(key, ET.tostring(item))\n bbox = item.attrib['bbox'].split(',')\n item_props[\"x0\"] = Decimal(bbox[0])\n item_props[\"x1\"] = Decimal(bbox[2])\n item_props[\"y0\"] = Decimal(bbox[1])\n item_props[\"y1\"] = Decimal(bbox[3]) \n item_props[\"top\"] = Decimal(height - float(bbox[3]))\n item_props[\"bottom\"] = Decimal(height - float(bbox[1]))\n others.append(item_props)\n \n page_object[\"others\"] = others\n page = Page(page_object)\n page_object[\"para\"] = page.para\n page_object[\"plines\"] = page.lines\n page_object[\"bigbox\"] = page.bigbox\n page_object[\"components\"] = page.components\n\n self.pages[page_num+1] = page_object",
"def _get_information(self):\n reviews = self._tab.find_all(\"div\", class_=\"review\", attrs={'itemprop': 'review'})\n return [(self._get_review(elem), self._get_published_date(elem)) for elem in reviews]",
"def xmlpost_to_dict(post):\n\n tree = ET.parse(post)\n root = tree.getroot()\n msg = root.find('message')\n\n post_data = {}\n\n board_id = msg.find('board_id')\n post_data['board_id'] = int(board_id.text)\n\n root_post = msg.find('root').attrib['href']\n post_data['root_post'] = root_post.split('/')[-1]\n\n kudos = msg.find('kudos')\n count = kudos.find('count')\n post_data['kudos_count'] = int(count.text)\n\n edit_author_id = msg.find('last_edit_author').attrib['href']\n post_data['edit_author_id'] = int(edit_author_id.split('/')[-1])\n\n post_time = msg.find('post_time')\n post_data['post_time'] = post_time.text\n\n last_edit_time = msg.find('last_edit_time')\n post_data['last_edit_time'] = last_edit_time.text\n\n body = msg.find('body')\n post_data['body'] = body.text\n\n thread = msg.find('thread').attrib['href']\n post_data['thread'] = int(thread.split('/')[-1])\n\n board = msg.find('board').attrib['href']\n post_data['board'] = board.split('/')[-1]\n\n try:\n parent_post = msg.find('parent').attrib['href']\n post_data['parent_post'] = int(parent_post.split('/')[-1])\n except KeyError:\n post_data['parent_post'] = None\n\n views = msg.find('views')\n post_data['views'] = int(views.find('count').text)\n\n subject = msg.find('subject')\n post_data['subject'] = subject.text\n\n post_id = msg.find('id')\n post_data['post_id'] = int(post_id.text)\n\n author_id = msg.find('author').attrib['href']\n post_data['author_id'] = int(author_id.split('/')[-1])\n\n return post_data",
"def to_dict(xml):\n children = xml.getchildren()\n if not children:\n return xml.text\n else:\n out = {}\n for node in xml.getchildren():\n if node.tag in out:\n if not isinstance(out[node.tag], list):\n out[node.tag] = [out[node.tag]]\n out[node.tag].append(to_dict(node))\n else:\n out[node.tag] = to_dict(node)\n return out",
"def _get_elements(self):\n address_elements = {\n 'organisation': \"{}{}\".format(\n self.organisation if self.organisation else \"\",\n '\\n' + self.department if self.department else \"\",\n ),\n 'sub-building name': self.sub_building_name,\n 'building name': self.building_name,\n 'building number': self.building_number,\n 'PO box': self.po_box_num,\n 'dependent thoroughfare': self.dependent_thoroughfare,\n 'thoroughfare': self.thoroughfare,\n 'double dependent locality': self.double_dependent_locality,\n 'dependent locality': self.dependent_locality,\n 'town': self.town,\n 'postcode': \"{} {}\".format(\n self.postcode[:-3], \n self.postcode[-3:]\n ),\n 'concatenation indicator': self.concatenation_indicator\n }\n return address_elements",
"def get_attachments(xml):\r\n items = get_items(xml)\r\n names = {}\r\n attachments = []\r\n\r\n for item in items:\r\n kind = item.find('post_type').string\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n\r\n if kind == 'attachment':\r\n attachments.append((item.find('post_parent').string,\r\n item.find('attachment_url').string))\r\n else:\r\n filename = get_filename(filename, post_id)\r\n names[post_id] = filename\r\n attachedposts = {}\r\n for parent, url in attachments:\r\n try:\r\n parent_name = names[parent]\r\n except KeyError:\r\n #attachment's parent is not a valid post\r\n parent_name = None\r\n\r\n try:\r\n attachedposts[parent_name].append(url)\r\n except KeyError:\r\n attachedposts[parent_name] = []\r\n attachedposts[parent_name].append(url)\r\n return attachedposts",
"def get_tags(element):\n tags = []\n id_num = element.attrib['id']\n for child in element.iter('tag'):\n attr = child.attrib\n\n # check for problematic characters first and skip if matches\n if PROBLEMCHARS.search(attr['k']):\n continue\n\n child_dict = {}\n child_dict['id'] = id_num\n child_dict['value'] = attr['v']\n\n # stackoverflow.com/questions/6903557/splitting-on-first-occurrence\n child_dict['key'] = attr['k'].split(':', 1)[-1]\n\n # Check if the k tag has : in it and treat according to specs\n if LOWER_COLON.search(attr['k']):\n child_dict['type'] = attr['k'].split(':')[0]\n else:\n child_dict['type'] = default_tag_type\n\n # street name check (not all : matches are addr:)\n if child_dict['type'] == 'addr' & child_dict['key'] == 'street':\n child_dict['value'] = update_street_name(child_dict['value'])\n\n tags.append(child_dict)\n\n return tags",
"def getXmlDict(oxml):\n lines = oxml.split(\"\\n\")\n rrd_d = {}\n # <cf> AVERAGE </cf>\n # <pdp_per_row> 288 </pdp_per_row> <!-- 86400 seconds -->\n\n # parse xml file\n key = \"\"\n rows = [] \n for line in lines:\n if (reMatchCF(line)):\n cf = line.split()[1]\n key += cf\n if (reMatchPDP(line)):\n pdp = line.split()[1]\n key += pdp\n if (reMatchRow(line)):\n ele = line.split()\n time = ele[5]\n val = ele[8]\n rows.append([time,val,line])\n # end of rra is reached, store to dict and rest vals\n if (reMatchDBEnd(line) and key and rows):\n rrd_d[key] = rows\n key = \"\"\n rows = []\n return rrd_d",
"def _xml_convert(self, element):\n\n children = list(element)\n\n if len(children) == 0:\n return self._type_convert(element.text)\n else:\n # if the fist child tag is list-item means all children are list-item\n if children[0].tag == \"list-item\":\n data = []\n for child in children:\n data.append(self._xml_convert(child))\n else:\n data = {}\n for child in children:\n data[child.tag] = self._xml_convert(child)\n\n return data",
"def pitems(self):\n return self.palues().root()",
"def pitems(self):\n return self.palues().root()",
"def post_data(driver):\n post_info = {\n \"post_age\" : \"li.posted\", \n \"page_views\" : \"ul.posting-info li.views\"\n }\n for key, selector in post_info.items():\n try:\n text = driver.find_element_by_css_selector(selector).text\n if key == \"post_age\":\n post_info[key] = parse_post_age(text)\n else:\n post_info[key] = ''.join(list(filter(lambda c: c.isdigit(), text)))\n except Exception as e:\n post_info[key] = \"\"\n pass\n return post_info",
"def extract_node(element, node_attr_fields = NODE_FIELDS, problem_chars=PROBLEMCHARS, default_tag_type='regular') :\r\n attribs = {}\r\n tags = []\r\n\r\n \"\"\" Extraction Routine\"\"\"\r\n for key in node_attr_fields:\r\n attribs[key] = element.attrib[key]\r\n for tag in element.iter(\"tag\"):\r\n node_tag = {}\r\n node_tag[\"type\"] = default_tag_type\r\n node_tag[\"id\"] = attribs[\"id\"]\r\n node_tag[\"value\"] = tag.attrib[\"v\"]\r\n\r\n k = tag.attrib[\"k\"]\r\n\r\n if problem_chars.search(k):\r\n continue\r\n elif \":\" in k:\r\n node_tag[\"key\"] = k.split(\":\", 1)[1]\r\n node_tag[\"type\"] = k.split(\":\", 1)[0]\r\n else:\r\n node_tag[\"key\"] = k\r\n\r\n # Update city name , if any, before appending the dictionary in list\r\n\r\n if node_tag[\"key\"] == \"city\":\r\n node_tag[\"value\"] = update_city_name(node_tag[\"value\"])\r\n\r\n # Update street name, if any , as per mapping\r\n\r\n if node_tag[\"key\"] == \"street\" or \"street:name\":\r\n node_tag[\"value\"] = update_street_name(node_tag[\"value\"], mapping)\r\n\r\n # Check if postcode is valid, if invalid prefix the postcode value with 'fixme:'\r\n\r\n if node_tag[\"key\"] == \"postcode\":\r\n invalid, node_tag[\"value\"] = update_postcode(node_tag[\"value\"])\r\n if invalid:\r\n node_tag[\"value\"] = 'fixme:' + node_tag[\"value\"]\r\n\r\n\r\n tags.append(node_tag)\r\n\r\n return {'node': attribs, 'node_tags': tags}",
"def parse_xml1(filename):\r\n tree = ET.parse(filename)\r\n # tree=ElementTree()\r\n # tree.parse(filename)\r\n\r\n baseInfo={}\r\n baseInfo['foder'] = tree.find('foder').text\r\n baseInfo['filename'] = tree.find('filename').text\r\n baseInfo['path'] = tree.find('path').text\r\n baseInfo['source/database'] = tree.find('source/database').text\r\n #tree.find('database')\r\n baseInfo['size/width'] = tree.find('size/width').text\r\n baseInfo['size/height'] = tree.find('size/height').text\r\n baseInfo['size/depth'] = tree.find('size/depth').text\r\n baseInfo['segmented'] = tree.find('segmented').text\r\n objects = []\r\n for obj in tree.findall('object'):\r\n obj_struct = {}\r\n obj_struct['score'] = obj.find('score').text\r\n obj_struct['region'] = obj.find('region').text\r\n obj_struct['imageptr'] = obj.find('imageptr').text\r\n if obj.find('label_des') is None:\r\n obj_struct['label_des']=\"\"\r\n else:\r\n obj_struct['label_des'] = obj.find('label_des').text\r\n obj_struct['name'] = obj.find('name').text\r\n obj_struct['pose'] = obj.find('pose').text\r\n obj_struct['truncated'] = obj.find('truncated').text #remove int()\r\n obj_struct['difficult'] = obj.find('difficult').text #remove int()\r\n bbox = obj.find('bndbox')\r\n obj_struct['bbox'] = [int(bbox.find('xmin').text),\r\n int(bbox.find('ymin').text),\r\n int(bbox.find('xmax').text),\r\n int(bbox.find('ymax').text)]\r\n objects.append(obj_struct)\r\n\r\n return baseInfo,objects",
"def convertXmlToProtein(self, xml):\n\t\t# XML to dictionary\n\t\tproteinObject = Protein()\n\t\t\n\t\tdictionary = xmltodict.parse(xml)\n\t\troot = dictionary[\"uniprot\"]\n\t\tentry = root[\"entry\"]\n\t\t\n\t\tfor element, value in entry.items():\n\t\t\tif element == \"@accession\":\n\t\t\t\tproteinObject.addAttribute(\"id\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"name\":\n\t\t\t\tproteinObject.addAttribute(\"proteinShortName\", \"uniprot\", value)\n\t\t\t\t\n\t\t\tif element == \"protein\":\n\t\t\t\tfullname = value[\"recommendedName\"][\"fullName\"]\n\t\t\t\tproteinObject.addAttribute(\"proteinFullName\", \"uniprot\", fullname)\n\t\t\t\t\n\t\t\tif element == \"@created\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"creationDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\t\n\t\t\tif element == \"@modified\":\n\t\t\t\tyear,month,day = value.split(\"-\")\n\t\t\t\tproteinObject.addAttribute(\"modifiedDate\", \"uniprot\", self.convertDateToNative(day,month,year) )\n\t\t\t\n\t\t\tif element == \"comment\":\n\t\t\t\tfor comment in entry[\"comment\"]:\n\t\t\t\t\tif \"text\" in comment:\n\t\t\t\t\t\ttext = comment[\"text\"][\"#text\"] if isinstance(comment[\"text\"], OrderedDict) else comment[\"text\"]\n\t\t\t\t\t\tproteinObject.addAttribute(comment[\"@type\"], \"uniprot\",text)\n\t\t\t\t\t\n\t\t\tif element == \"gene\":\n\t\t\t\tgenes = []\n\t\t\t\tfor gene in value[\"name\"]:\n\t\t\t\t\tif \"#text\" in gene and isinstance(gene, OrderedDict):\n\t\t\t\t\t\tgenes.append(gene[\"#text\"])\n\t\t\t\t\t\n\t\t\t\tproteinObject.addAttribute(\"geneName\", \"uniprot\", genes)\n\t\t\t\t\t\n\t\t\tif element == \"organism\":\n\t\t\t\tif isinstance(value[\"name\"], list):\n\t\t\t\t\torganisms = []\n\t\t\t\t\tfor organism in value[\"name\"]:\n\t\t\t\t\t\torganisms.append(organism[\"#text\"])\n\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tproteinObject.addAttribute(\"organism\", \"uniprot\", value[\"name\"][\"#text\"])\n\t\t\t\t\n\t\t\t\n\t\t\tif element == \"sequence\":\n\t\t\t\tproteinObject.addAttribute(\"sequence\", \"uniprot\",value[\"#text\"].replace(\"\\n\",\"\"))\n\t\t\t\tproteinObject.addAttribute(\"sequencelength\", \"uniprot\",value[\"@length\"].replace(\"\\n\",\"\"))\n\n\n\t\treturn proteinObject",
"def get_data(tree_elem):\n fly_lst = []\n for element in tree_elem:\n for elem in element.xpath('td/label/div[1]/span'):\n fly_dict = dict()\n fly_info_lst = [item.strip() for item in elem.xpath('@title')[0].split(',')]\n class_cost_lst = fly_info_lst[3].split(':')\n fly_dict['dep/arv'] = fly_info_lst[1]\n fly_dict['dur'] = fly_info_lst[2]\n fly_dict['class'] = class_cost_lst[0]\n fly_dict['cost'] = get_price(class_cost_lst[1])\n fly_lst.append(fly_dict)\n return fly_lst",
"def get_items_from_element(element):\n data = {'element': element,\n 'items': []}\n for item in element[len(element)-1]:\n item_info = {'data': item.items(),\n 'tag': item.tag,\n 'keys': item.keys()}\n data['items'].append(item_info)\n return data",
"def retrieve_ext_list(self, puid_list):\n xml_iter = self._parse_xml()\n puiddict = {}\n for topelements in xml_iter:\n if (\n topelements.tag\n == \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}FileFormatCollection\"\n ):\n for fileformats in topelements:\n puid = fileformats.get(\"PUID\")\n for puids in puid_list:\n if puids != puid:\n continue\n ext = fileformats.find(\n \"{http://www.nationalarchives.gov.uk/pronom/SignatureFile}Extension\"\n )\n if ext is not None:\n # Return the first file format extension.\n puiddict[puids] = ext.text\n break\n puiddict[puids] = None\n break\n notfound = []\n for puid in puid_list:\n if puid not in puiddict:\n if puid not in notfound:\n notfound.append(puid)\n if len(notfound) > 0:\n for puid in notfound:\n puiddict[puid] = \"notfound\"\n return puiddict",
"def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict",
"def kgml_parser(self, kegg_cpd_id_list):\n result_dic = dict()\n # try:\n kg_tree = et.fromstring(self.kgml)\n for cpd in kegg_cpd_id_list:\n for el in kg_tree.iterfind('entry/graphics[@name=\"%s\"]' % cpd):\n if cpd not in result_dic.keys():\n result_dic[cpd] = [(el.get('x'), el.get('y'))]\n else:\n result_dic[cpd].append((el.get('x'), el.get('y')))\n # except:\n # # todo error exception\n # print 'error while parsing kgml of %s' % self.kegg_id\n return result_dic",
"def get_pcr_sequences(self):\n d = {}\n for analyser in self.xml_tree.getroot():\n for child in analyser:\n if child.tag == 'all-assays':\n for assay in child:\n attributes = assay.attrib\n assay_id = attributes['id']\n if re.match(r'rs\\d+', assay_id):\n d[assay_id] = [attributes['pcr1'], attributes['pcr2']]\n return d",
"def get_proplist(self, naam):\r\n h = self._root.find(naam)\r\n if h is None:\r\n h = []\r\n else:\r\n hh = h.findall(\"regel\")\r\n h = []\r\n for x in hh:\r\n if x.text is None:\r\n h.append(\"\")\r\n else:\r\n h.append(x.text.rstrip())\r\n return h",
"def parse_book_record(root) -> dict:\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//BookTitle/text()\")))\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Book/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Book/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Book/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n return doc",
"def get_elements():\n elements = { 'Shapes':[], 'Strokes':[] }\n curves_knob = rp_node['curves']\n root_layer = curves_knob.rootLayer\n elements = parse_layer(root_layer, elements, [root_layer])\n print elements",
"def xml_para_dicionario(self, string, multi=False):\r\n try:\r\n\t root = xml.fromstring(string)\r\n itens = []\r\n for cclass in root:\r\n mykeys = []\r\n myvalues = []\r\n for item in cclass:\r\n children = item.getchildren() # Em caso de tags encadeadas\r\n if children:\r\n for child in children:\r\n mykeys.append(child.tag.lower())\r\n myvalues.append(child.text )\r\n\t else:\r\n mykeys.append(item.tag.lower())\r\n myvalues.append(item.text )\r\n it = dict(zip(mykeys, myvalues))\r\n itens.append(it)\r\n\r\n if multi: # Retorna uma lista de dicionarios \r\n return itens\r\n return itens[0] # Retorna apenas um dicionario\r\n\r\n\texcept Exception, e:\r\n return None",
"def recursive_parse_xml_to_dict(xml):\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}",
"def elem2dict(node):\n result = {}\n\n for element in node.iterchildren():\n # Remove namespace prefix\n key = element.tag.split('}')[1] if '}' in element.tag else element.tag\n key = key[:1].lower() + key[1:]\n\n # Process element as tree element if the inner XML contains non-whitespace content\n if element.text and element.text.strip():\n value = element.text\n else:\n value = elem2dict(element)\n if key in result:\n if type(result[key]) is list:\n result[key].append(value)\n else:\n tempvalue = result[key].copy()\n result[key] = [tempvalue, value]\n else:\n result[key] = value\n return result",
"def _parse_political_posts(self):\n functions = [\n h\n for h in self.right_column.find_all(\"h4\")\n if h.text == \"Politische Funktionen\"\n ]\n if functions:\n functions = functions[0].nextSibling.nextSibling.find_all(\"li\")\n # TODO: Can we do better than just taking the whole string?\n return {\"posts\": self._get_current_and_former(functions)}\n return {}",
"def get_data_from_bs(bs_data):\n all_raw_data = bs_data.find_all(\"properties\")\n return (to_dict(tag for tag in raw if type(tag) is Tag) for raw in all_raw_data)",
"def parseX(self):\n\t\treturn self._dictOut.keys()",
"def recursive_parse_xml_to_dict(xml):\n if not xml:\n return {xml.tag: xml.text}\n result = {}\n for child in xml:\n child_result = recursive_parse_xml_to_dict(child)\n if child.tag != 'object':\n result[child.tag] = child_result[child.tag]\n else:\n if child.tag not in result:\n result[child.tag] = []\n result[child.tag].append(child_result[child.tag])\n return {xml.tag: result}",
"def parseX(self):\n return self.dictOut.keys()",
"def tag_dict(self):\n tag_dict = dict()\n for document in self.documents:\n for tag in document.tags:\n tag_type = tag['tag']\n tag_dict[tag_type] = tag_dict.get(tag_type, []) + [tag]\n return tag_dict",
"def get_devices_information():\n global nipper_xml\n devices = {}\n\n for device in nipper_xml.findall('./information/devices/device'):\n if DEBUG:\n print \"\\t\" + note + \"Name: %s\" % device.get('name')\n print \"\\t\" + note + \"Type: %s\" % device.get('type')\n print \"\\t\" + note + \"OS: %s\" % device.get('os')\n print \"\\t\" + note + \"OS Version: %s\" % device.get('osversion')\n devices[device.attrib.get('name')] = {'name': device.get('name'),\n 'type': device.get('type'),\n 'os': device.get('os'),\n 'osversion': device.get('osversion')}\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices",
"def load_infos(self):\n xml = self.api.photos_getInfo(photo_id=self.id)\n xml = xml.find(\"photo\")\n out = xml.attrib\n out[\"title\"] = xml.find(\"title\").text\n out[\"description\"] = xml.find(\"description\").text\n out[\"dates\"] = xml.find(\"dates\").attrib\n\n # Load urls\n out[\"urls\"] = {}\n for url_xml in xml.find(\"urls\").findall(\"url\"):\n out[\"urls\"][url_xml.attrib[\"type\"]] = url_xml.text\n\n # Load tags\n out[\"tags\"] = []\n for tag_xml in xml.find(\"tags\").findall(\"tag\"):\n tag = tag_xml.attrib\n tag[\"tag\"] = tag_xml.text\n out[\"tags\"].append(tag)\n\n return out",
"def get_agencies():\n\n xml_query_string = 'http://webservices.nextbus.com/service/publicXMLFeed?command=agencyList'\n xml_request = requests.get(xml_query_string)\n agencies = {}\n root = ET.fromstring(xml_request.text)\n\n for child in root:\n agencies[child.attrib['tag']] = child.attrib['title']\n return agencies",
"def info(self) -> dict:\n xml_path = self.xml_path.format(id=self.id)\n p_id = int(ET.parse(xml_path).find('pattern_id').text)\n defect_flag = bool(int(ET.parse(xml_path).find('defective').text))\n info = {'pattern_id': p_id, 'id': self.id, 'defective': defect_flag}\n return info",
"def wp2fields(xml, wp_custpost=False):\r\n\r\n items = get_items(xml)\r\n for item in items:\r\n\r\n if item.find('status').string == \"publish\":\r\n\r\n try:\r\n # Use HTMLParser due to issues with BeautifulSoup 3\r\n title = HTMLParser().unescape(item.title.contents[0])\r\n except IndexError:\r\n title = 'No title [%s]' % item.find('post_name').string\r\n logger.warning('Post \"%s\" is lacking a proper title' % title)\r\n\r\n filename = item.find('post_name').string\r\n post_id = item.find('post_id').string\r\n filename = get_filename(filename, post_id)\r\n\r\n content = item.find('encoded').string\r\n raw_date = item.find('post_date').string\r\n date_object = time.strptime(raw_date, \"%Y-%m-%d %H:%M:%S\")\r\n date = time.strftime(\"%Y-%m-%d %H:%M\", date_object)\r\n author = item.find('creator').string\r\n\r\n categories = [cat.string for cat in item.findAll('category', {'domain' : 'category'})]\r\n # caturl = [cat['nicename'] for cat in item.find(domain='category')]\r\n\r\n tags = [tag.string for tag in item.findAll('category', {'domain' : 'post_tag'})]\r\n\r\n kind = 'article'\r\n post_type = item.find('post_type').string\r\n if post_type == 'page':\r\n kind = 'page'\r\n elif wp_custpost:\r\n if post_type == 'post':\r\n pass\r\n # Old behaviour was to name everything not a page as an article.\r\n # Theoretically all attachments have status == inherit so\r\n # no attachments should be here. But this statement is to\r\n # maintain existing behaviour in case that doesn't hold true.\r\n elif post_type == 'attachment':\r\n pass\r\n else:\r\n kind = post_type\r\n yield (title, content, filename, date, author, categories, tags,\r\n kind, \"wp-html\")",
"def readWarp(self):\n warpDict = {}\n for warpAxisElement in self.root.findall(\".warp/axis\"):\n axisName = warpAxisElement.attrib.get(\"name\")\n warpDict[axisName] = []\n for warpPoint in warpAxisElement.findall(\".map\"):\n inputValue = float(warpPoint.attrib.get(\"input\"))\n outputValue = float(warpPoint.attrib.get(\"output\"))\n warpDict[axisName].append((inputValue, outputValue))\n self.warpDict = warpDict",
"def _item_to_elements_parser(self, item):\n elements = {}\n\n ####### Sad solution - look for better one. #######\n items = [\"data\", \"img\", \"title\", \"link\", \"price\"]\n values = (\"item.p.string.strip()\", 'item.img[\"src\"]', 'item.img[\"alt\"]',\n '''item.find(\"a\", {\"class\":\"detailsLink\"})['href']''',\n '''item.find('strong').string.strip()''')\n for key, value in zip(items, values):\n\n # CONVERT TIME\n # if key == \"data\":\n # try:\n # print (time.strptime(eval(value), \"%d %b\"))\n # except Exception as error:\n # print (error) # time data '5 paz' does not match format '%d %b'\n\n try:\n elements.update({key:eval(value)})\n except (TypeError, AttributeError):\n elements.update({key:None})\n\n\n # print()\n # for key, val in elements.items():\n # print (key, val)\n # print()\n ###################################################\n return elements",
"def tags_dict(self):\n return ({'name': 'tag', 'attrs': {'k': k, 'v': v}} for k, v in self.tags.items())",
"def parseX(self):\n return self._dictOut.keys()",
"def rss_attributes(self):\n return {u\"version\": self._version,\n u\"xmlns:media\": u\"http://search.yahoo.com/mrss/\",\n \"xmlns:atom\": u\"http://www.w3.org/2005/Atom\"\n }",
"def _get_sideinfo(self, content_tag):\n\n # dictionary that used to store all the relevant information\n # regarding an apartment\n sideinfo = {} \n try:\n # main content of all the relavent features \n apt_info_tags = content_tag.find_all('div', class_='flex flex-col pr-8')\n \n for apt_tag in apt_info_tags:\n # construct (key, value) pair for the dictionary \n key = apt_tag.find('div', class_='data-name') \\\n .get_text() \\\n .strip()\n\n value = apt_tag.find('div', class_='data-value') \\\n .get_text() \\\n .strip()\n try:\n value = self._extract_num(value)\n except:\n pass\n\n # fill in the dictionary\n sideinfo[key] = value\n\n return sideinfo\n except:\n return sideinfo",
"def etree2dict(element):\n i = dict(element.items())\n i.update(_make_content(i, element.text, strip=True))\n\n for child in element:\n tag = child.tag\n value = etree2dict(child)\n i.update(_make_content(i, value, tag))\n\n if element.text and not set(i).difference([\"content\"]):\n # element is leaf node and doesn't have attributes\n i = i.get(\"content\")\n\n return i",
"def make_books_dicts(xml, book_list):\n\n books_response = xml.GoodreadsResponse.reviews.review\n for book in books_response:\n a_book = {}\n a_book['title'] = book.book.title.cdata.encode('utf8')\n a_book['author_name'] = book.book.authors.author.name.cdata.encode('utf8')\n a_book['author_gr_id'] = int(book.book.authors.author.id.cdata.encode('utf8'))\n a_book['gr_work_id'] = int(book.book.work.id.cdata.encode('utf8'))\n a_book['description'] = book.book.description.cdata\n\n a_book['edition'] = {}\n a_book['edition']['isbn'] = valid_isbn(book.book.isbn.cdata.encode('utf8'))\n a_book['edition']['format_id'] = get_format_id(book.book.format.cdata.encode('utf8'))\n a_book['edition']['pic_url'] = book.book.image_url.cdata.encode('utf8')\n a_book['edition']['publisher'] = book.book.publisher.cdata.encode('utf8')\n a_book['edition']['gr_url'] = book.book.link.cdata.encode('utf8')\n a_book['edition']['gr_id'] = int(book.book.id.cdata.encode('utf8'))\n year = date_is_valid(book.book.publication_year.cdata.encode(\"utf8\"))\n month = date_is_valid(book.book.publication_month.cdata.encode(\"utf8\"))\n day = date_is_valid(book.book.publication_day.cdata.encode(\"utf8\"))\n a_book['edition']['date'] = datetime.date(year, month, day)\n a_book['edition']['num_pages'] = valid_page_count(book.book.num_pages.cdata.encode('utf8'))\n book_list.append(a_book)\n\n print \"*******THERE ARE \" + str(len(book_list)) + \" ON THIS SHELF*******\"\n\n return book_list",
"def parse(self):\n p = feedparser.parse(self.xml)\n self.p = p\n return p",
"def as_dict(self):\n return {'name': self.name, 'attrs': self.attrs_dict, 'children': self.tags_dict}",
"def parse_xml(file_name):\n events = (\"start\", \"end\")\n has_start = False\n json_dict = dict()\n # Traverse the XML\n for event, element in ET.iterparse(file_name, events=events, encoding=\"utf-8\", load_dtd=True, recover=True):\n print(event, element.tag, element.text)\n # Article node: initialize variables\n if event == 'start' and element.tag in INCLUDE_ARTICLES:\n has_start = True\n # Each article node has an unique attribute key\n publication_key = element.attrib['key']\n authors = list()\n publication_year = ''\n publication_type = str(element.tag)\n publication_title = ''\n # Author node\n elif event == 'start' and element.tag == 'author' and has_start:\n no_accent = lambda x: unidecode.unidecode(x) if x is not None else x\n authors.append(no_accent(element.text))\n # Title node\n elif event == 'start' and element.tag == 'title' and has_start:\n publication_title = element.text\n # Year node\n elif event == 'start' and element.tag == 'year' and has_start:\n publication_year = element.text\n # End article node: save information. This will never execute before initializing all of the variables\n elif has_start and event == 'end' and element.tag in INCLUDE_ARTICLES:\n json_dict[publication_key] = {\n '_id': publication_key,\n 'authors': authors,\n 'title': publication_title,\n 'year': publication_year,\n 'type': publication_type}\n has_start = False\n element.clear()\n else:\n # Remove element (otherwise there will be memory issues due to file size)\n element.clear()\n continue\n\n return json_dict",
"def _xmlTreeToDict(cls, node):\n if not isinstance(node, ElementTree.Element):\n raise ATException('_xmlTreeToDict(), param: [node] expected a xml.etree.ElementTree.Element')\n\n nodeDict = {}\n\n if len(node.items()) > 0:\n nodeDict.update(dict(node.items()))\n\n for child in node:\n childItemDict = cls._xmlTreeToDict(child)\n if child.tag in nodeDict:\n if isinstance(nodeDict[child.tag], list):\n nodeDict[child.tag].append(childItemDict)\n else:\n nodeDict[child.tag] = [nodeDict[child.tag], childItemDict]\n else:\n nodeDict[child.tag] = childItemDict\n\n text = ''\n if node.text is not None:\n text = node.text.strip()\n\n if len(nodeDict) > 0:\n if len(text) > 0:\n nodeDict[node.tag + '_text'] = text\n else:\n nodeDict = text\n\n return nodeDict",
"def parse(k):\n return stringify_children(xml_object.xpath(k)[0])",
"def get_tags():\n xml_tree = ET.parse(RSS_FEED)\n root = xml_tree.getroot()\n items = root.getchildren()[0].getchildren()\n\n tags = {}\n for item in items:\n children = item.getchildren()\n for child in children:\n if child.tag == \"category\":\n tag = child.text.replace(\"-\", \" \").lower()\n tags[tag] = tags.get(tag, 0) + 1\n\n return tags",
"def infos_serie(self):\n if self._root is None:\n return\n\n infos = {}\n serie = self._root.find('Series')\n infos['firstAired'] = serie.find('FirstAired').text\n infos['description'] = unicode(serie.find('Overview').text)\n infos['lastUpdated'] = int(serie.find('lastupdated').text)\n return infos",
"def extract_posts(posts_file, output_filename=direc+\"/posts.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting posts from \" + posts_file + \"...\")\r\n posts_dict = {}\r\n with open(output_filename, 'w', encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(posts_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n line = \"\"\r\n if child.attrib['PostTypeId'] == '1' and 'AcceptedAnswerId' in child.attrib:\r\n posts_dict[child.attrib['Id']] = {'accepted': child.attrib['AcceptedAnswerId'], 'other': []}\r\n clean_title = clean_markdown(child.attrib['Title'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + clean_title + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n elif child.attrib['PostTypeId'] == '2':\r\n if child.attrib['ParentId'] in posts_dict and not child.attrib['Id'] == posts_dict[child.attrib['ParentId']]['accepted']:\r\n posts_dict[child.attrib['ParentId']]['other'].append(child.attrib['Id'])\r\n clean_body = clean_markdown(child.attrib['Body'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['ParentId'] + \"\\t\" + clean_body + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n current += 1\r\n f.write(line)\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting posts from \" + output_filename + \".\\n\")\r\n return posts_dict",
"def get_tags(self,element):\n if element in self.element2tags.keys():\n return self.element2tags[element]\n return []",
"def xml_to_dict(args):\n rdict = dict()\n args = re.sub(r'xmlns=\\\".+?\\\"', '', args)\n root = ET.fromstring(args)\n ifmtrunk = root.find('.//ifmtrunk')\n if ifmtrunk is not None:\n try:\n ifmtrunk_iter = ET.Element.iter(ifmtrunk)\n except AttributeError:\n ifmtrunk_iter = ifmtrunk.getiterator()\n\n for ele in ifmtrunk_iter:\n if ele.text is not None and len(ele.text.strip()) > 0:\n rdict[ele.tag] = ele.text\n return rdict",
"def parse(k):\r\n return stringify_children(xml_object.xpath(k)[0])",
"def dom2dict(element):\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))",
"def get_attrs_dict(self, root_element):\n attr_elements = root_element.findall(\"attribute\")\n attrs_dict = {}\n for el in attr_elements:\n attrs_dict[el.attrib[\"name\"]] = {\n \"value\": el.attrib[\"value\"],\n \"type\": el.attrib.get(\"type\", None)\n }\n return attrs_dict",
"def children(self):\n try:\n attr_name = 'Parrot_%s_attributes' % self.pmc_name\n attr_type = gdb.lookup_type(attr_name).pointer()\n\n attrs = self.val['data'].cast(attr_type).dereference()\n\n '''\n Something ridiculous happens here. I take a list of tuples:\n [ (\"key1\", \"val1\"), (\"key2\", \"val2\") ]\n\n and turn it, in one iteration, into:\n [\n [(\"name\", \"key1\"), (\"value\", \"val1\")],\n [(\"name\", \"key2\"), (\"value\", \"val2\")]\n ]\n\n That, in turn, is mutated into one list.\n [\n (\"name\", \"key1\"), (\"value\", \"val1\"),\n (\"name\", \"key2\"), (\"value\", \"val2\")\n ]\n\n What we go through for 100% lazy iteration.\n '''\n name_value_tuples = PMCIterator(attrs)\n nv_iter = itertools.imap(lambda val: [ (\"name\", val[0]), (\"value\", val[1]) ],\n name_value_tuples)\n nv_chain = itertools.chain.from_iterable(nv_iter)\n\n return nv_chain\n except RuntimeError as e:\n return [ ( \"__ERROR__\", \"\" ) ].__iter__()",
"def xml2dict( xml, sanitize=True, prefix=None):\n \n \n #Decode to avert parsing errors as some software dump large text\n #fields into the file that occasionally contain erronious chars\n xml=xml.decode('utf-8', errors='ignore')\n\n \n return etree2dict(etree.fromstring(xml), sanitize, prefix)",
"def parseX(self):\n\t\treturn self._dict.keys()",
"def serialize(self):\n child_dict = OrderedDict()\n for attr, item in iteritems(self._contents):\n child_dict[attr] = item.serialize()\n return child_dict",
"def parse(tree, callback):\n doc_attrs = tree.attrib\n bibliographic = tree.find(\n '{http://www.epo.org/exchange}bibliographic-data')\n family = tree.find('{http://www.epo.org/exchange}patent-family')\n title = bibliographic.findall(\n '{http://www.epo.org/exchange}invention-title')\n callback({\n 'title': fmap(lambda x: {'title': x.text, 'lang': x.attrib.get('lang', ''), 'data-format': x.attrib.get('data-format', '')}, title),\n 'country': doc_attrs.get('country', ''),\n 'status': doc_attrs.get('status', ''),\n # identical to <doc-number> in <publication-reference> (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'doc-number': doc_attrs.get('doc-number', ''),\n # identical to <kind> in <publication-reference> (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'kind': doc_attrs.get('kind', ''),\n # 5.3.1. Attribute \"doc-id\" (http://documents.epo.org/projects/babylon/eponet.nsf/0/6266D96FAA2D3E6BC1257F1B00398241/$File/T09.01_ST36_User_Documentation_vs_2.5.7_en.pdf)\n 'doc-id': doc_attrs.get('doc-id', ''),\n 'date-publ': doc_attrs.get('date-publ', ''),\n 'family-id': doc_attrs.get('family-id', ''),\n 'family-members': familymembers(family),\n 'parties': parties(bibliographic),\n 'citations': citations(bibliographic),\n 'classifications': classifications(bibliographic)\n })",
"def extract_staxml_info(staxml):\n instruments = defaultdict(dict)\n\n if isinstance(staxml, Inventory):\n inv = staxml\n else:\n if os.path.isfile(staxml):\n inv = safe_load_staxml(staxml)\n else:\n raise ValueError(\"Input staxml is neither obspy.Inventory or \"\n \"staxml file\")\n for nw in inv:\n nw_code = nw.code\n for sta in nw:\n sta_code = sta.code\n for chan in sta:\n chan_code = chan.code\n loc_code = chan.location_code\n key = \"%s.%s.%s.%s\" % (nw_code, sta_code, loc_code, chan_code)\n instruments[key][\"latitude\"] = chan.latitude\n instruments[key][\"longitude\"] = chan.longitude\n instruments[key][\"elevation\"] = chan.elevation\n instruments[key][\"depth\"] = chan.depth\n if chan.sensor.description is not None:\n sensor_type = chan.sensor.description\n elif chan.sensor.type is not None:\n sensor_type = chan.sensor.type\n else:\n sensor_type = \"None\"\n instruments[key][\"sensor\"] = sensor_type\n\n return instruments",
"def ParseXML(self, rawXML):\n if \"Search error: API limited due to abuse\" in str(rawXML.items()):\n raise Rule34_Error('Rule34 rejected your request due to \"API abuse\"')\n\n d = {rawXML.tag: {} if rawXML.attrib else None}\n children = list(rawXML)\n if children:\n dd = defaultdict(list)\n for dc in map(self.ParseXML, children):\n for k, v in dc.items():\n dd[k].append(v)\n d = {rawXML.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}\n if rawXML.attrib:\n d[rawXML.tag].update(('@' + k, v) for k, v in rawXML.attrib.items())\n if rawXML.text:\n text = rawXML.text.strip()\n if children or rawXML.attrib:\n if text:\n d[rawXML.tag]['#text'] = text\n else:\n d[rawXML.tag] = text\n return d",
"def parse(filename):\n\n tree = etree.parse(filename)\n root = tree.getroot()\n # according to the structure of the xml article meta nested under \n # front then article-meta\n articleMeta = root[0][1]\n # pubmed central article id\n pmcId = ''\n # the author list, the list of names excluding corresponding\n # athor\n otherAuthors = []\n # the name and email of the corresponding authors\n cAuthors = []\n # container for all the author groups\n authorGroups = []\n \n for child in articleMeta:\n # find the pmc id\n if ((child.tag == 'article-id') and not(isEmpty(child.attrib))):\n if (child.attrib['pub-id-type'] == 'pmc'):\n pmcId = child.text\n # find the author group\n elif (child.tag == 'contrib-group'):\n authorGroups.append(child)\n # this child may contain important corresponding information\n elif (child.tag == 'author-notes'):\n authorNotes = child\n # find the publication date\n elif (child.tag == 'history'):\n for theDate in child:\n if ('date-type' in theDate.attrib and theDate.attrib['date-type'] == 'accepted'):\n #publiction date YEAR MONTH DAY\n if (theDate.find('year') != None):\n theYear = theDate.find('year').text\n else:\n theYear = 0\t\n if (theDate.find('month') != None):\n theMonth = theDate.find('month').text\n else:\n theMonth = 6\n if (theDate.find('day') != None):\n theDay = theDate.find('day').text\n else:\n theDay = 1\n\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n elif (child.tag == 'pub-date'): \n if ('pub-type' in child.attrib and (child.attrib['pub-type'] == 'ppub' or child.attrib['pub-type'] == 'epub')):\n #for grandchild in child: print(grandchild.tag)\n \n if (child.find('year') != None):\n theYear = child.find('year').text\n else:\n theYear = 0\n \n if (child.find('month') != None):\n theMonth = child.find('month').text\n else:\n theMonth = 6\n \n if (child.find('day') != None):\n theDay = child.find('day').text\n else:\n theDay = 1\t\t\t\t\t\n publicationDate = (theYear, theMonth, theDay)\n try:\n dateCheck = date(int(theYear), int(theMonth), int(theDay))\n except:\n return((-1,))\n case1 = False # will be used for post-processing, corr author identified but no email\n for authorGroup in authorGroups:\n # parse author group information\n for child in authorGroup:\n if (child.tag == 'contrib' and child.attrib['contrib-type'] == 'author'):\n # the first child is the name tag\n try:\n name = child[0].find('given-names').text + ' ' + child[0].find('surname').text\n except:\n return((-1,))\n if ('corresp' in child.attrib): # and child.attrib['corresp'] == 'yes'):\n # if it a corresponding author\n # check to see if there is email field\n if (len(child) > 2 and child[1].find('email') != None):\n data = (name, child[1].find('email').text)\n cAuthors.append(data)\n #else post-process this case: case(1)\n else:\n data = (name, 'null')\n cAuthors.append(data)\n case1 = True\n else: \n # handle EMBO style xml \n xrefList = findInSubtree(child, 'xref')\n if (len(xrefList) > 0):\n for xref in xrefList:\n if ('ref-type' in xref.attrib and xref.attrib['ref-type'] == 'corresp'):\n # this is an corresponding author\n data = (name, '')\n cAuthors.append(data)\n case1 = True\n if (case1 == False):\n otherAuthors.append(name) \n else:\n # if not a corresponding author\n otherAuthors.append(name)\n\n # not done yet, some corresponding author information are embedded in author-notes\n if (case1 and 'authorNotes' in locals()):\n i = 0\n # corresponding author identified but no email found\n for child in authorNotes:\n if (child.tag == 'corresp'):\n for grandchild in child:\n if (grandchild.tag == 'email'):\n if (i == len(cAuthors)): break\t\n cAuthors[i] = (cAuthors[i][0], grandchild.text)\n i = i + 1\n elif ('authorNotes' in locals()):\n # the linking information is embedded entirely in the text\n text = etree.tostring(authorNotes).strip().decode('utf-8')\n emailElements = findInSubtree(authorNotes, 'email')\n for name in otherAuthors:\n j = 0\n if (text.find(name) != -1 and j < len(emailElements)):\n data = (name, emailElements[j].text)\n cAuthors.append(data)\n otherAuthors.remove(name)\n j = j + 1\n\n # sanity check here, reject anything that may corrupt the database\n if ('pmcId' in locals() and 'publicationDate' in locals()):\n try:\n print(pmcId, otherAuthors, cAuthors, publicationDate)\n except:\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n return(pmcId, otherAuthors, cAuthors, publicationDate)\n else:\n return((-1,))",
"def getElementProperties():",
"def read_xml(self):\n connection = urlopen(self.url)\n in_xml = connection.read()\n state = ElementTree.fromstring(in_xml)\n records = []\n record = []\n\n # Specific to CHP\n # TODO(David) Nested for loops are bad. Change this to be more\n # efficient, possibly use generators.\n for center in state:\n rec_center = center.attrib['ID']\n\n for dispatch in center:\n rec_dispatch = dispatch.attrib['ID']\n\n for log in dispatch:\n record = [rec_center, rec_dispatch]\n\n record.append(log.attrib['ID'])\n\n log_time = log.find('LogTime').text.strip('\"')\n log_type = log.find('LogType').text.strip('\"')\n location = log.find('Location').text.strip('\"')\n loc_desc = log.find('LocationDesc').text.strip('\"')\n area = log.find('Area').text.strip('\"')\n\n record.append(log_time)\n record.append(log_type)\n record.append(location)\n record.append(loc_desc)\n record.append(area)\n\n latlon = log.find('LATLON').text.strip('\"')\n\n (lat, lon) = latlon.split(':')\n lat = str(lat[:2]) + '.' + str(lat[2:])\n lon = '-' + str(lon[:3]) + '.' + str(lon[3:])\n\n record.append(lat)\n record.append(lon)\n\n records.append(record)\n\n self.records = records",
"def parsexml(self):\n raise NotImplementedError",
"def parse_xmls_article_ids(self):\n ids_list_d = []\n for x in self.xmls:\n ids_list_d.append(self.parse_one_xml_article_id(x))\n self.list_of_ids_dict = ids_list_d",
"def parse(self):\n result = {}\n if self.detail_statu:\n sel = Selector(text=self.driver.page_source)\n\n fact_table = sel.xpath(\n '//div[@class=\"facts-table\"]//text()').extract()\n result['facts'] = [list(i)\n for i in zip(fact_table[:: 2],\n fact_table[1:: 2])]\n\n tax_table = sel.xpath(\n '//div[@class=\"tax-values\"]//text()').extract()\n result['taxs'] = [list(i)\n for i in zip(tax_table[:: 2],\n tax_table[1:: 2])]\n\n listing_detail = sel.xpath(\n '//div[@class=\"amenities-container\"]//text()').extract()\n result['detail'] = listing_detail\n result['page_source'] = self.driver.page_source\n self.detail_statu = False\n else:\n self.log.warning(\n '---- Detail page url out of reach, use .search() first to get the detail page')\n return result",
"def _from_origin_to_dict(self):\n try:\n for elem in self._xml_tree.getchildren():\n if elem.tag == \"info\":\n for subelem in elem.xpath(\"//Metadata/General/Metas/Titulo\"):\n self._translated_dict[\"root\"][\"title\"] = subelem.text\n return self\n\n except Exception as e:\n logger.error(\"XML Parse Error. %s\" % repr(e))",
"def get_info_from_collections(self, element_soups):\n collections = []\n # Loop through each soup, make CollectionElement, store in collections\n for element_soup in element_soups:\n collections.append(CollectionElement(element_soup))\n # Return list of CollectionElements\n return collections",
"def extract_summary(self):\n metadata = {}\n\n ## document Id\n documentId = self.tree.find(\"./id\")\n documentId = documentId.attrib['root'] if documentId is not None and \"root\" in documentId.attrib else \"\"\n metadata[\"documentId\"] = documentId\n\n ## setId\n setid = self.tree.find(\"./setId\")\n setid = setid.attrib['root'] if setid is not None and \"root\" in setid.attrib else \"\"\n metadata[\"setId\"] = setid\n\n ## version number\n splversion = self.tree.find(\"./versionNumber\")\n versionNumber = \"\"\n if splversion is not None:\n if \"value\" in splversion.attrib:\n versionNumber = splversion.attrib[\"value\"]\n metadata[\"versionNumber\"] = versionNumber\n\n ## product type \n code = self.tree.find(\"./code\")\n check_if_attrib_exists = lambda x, key: x[key] if key in x else ''\n product_type = check_if_attrib_exists(code.attrib, \"displayName\")\n metadata[\"productType\"] = product_type\n\n ## title\n title_text = self.tree_et.xpath(\"./title//text()\")\n title = (\" \".join([self.strip_newline_tab(t) for t in title_text]) if len(title_text) > 0 else \"\")\n metadata[\"title\"] = title\n\n ## manufacturer\n manufacturer = self.tree.find(\"./author//representedOrganization/name\")\n if manufacturer != None and manufacturer.text != None:\n manufacturer = self.strip_newline_tab(manufacturer.text)\n else:\n manufacturer = \"\"\n metadata[\"manufacturer\"] = manufacturer\n\n ## effectivetime\n effectiveTime = self.tree_et.xpath(\"./effectiveTime/@value\")\n effectiveTime = self.__normalize_date(effectiveTime)\n\n metadata[\"effectiveTime\"] = effectiveTime\n metadata[\"publishedDate\"] = effectiveTime\n\n ## From manufacturedProduct section\n brand_name = self.tree_et.xpath(\".//manufacturedProduct//name\")\n brand_name = self.strip_newline_tab(brand_name[0].text) if len(brand_name) > 0 else \"\"\n metadata[\"drugName\"] = brand_name\n\n route = self.tree_et.xpath(\".//manufacturedProduct//formCode/@code\")\n route = self.strip_newline_tab(route[0]) if len(route) > 0 else \"\"\n metadata[\"routeOfAdministration\"] = route\n\n product_ndc = self.tree_et.xpath(\".//manufacturedProduct//code/@code\")\n product_ndc = self.strip_newline_tab(product_ndc[0]) if len(product_ndc) > 0 else \"\"\n metadata[\"ndcCode\"] = product_ndc\n\n generic_name = self.tree_et.xpath(\".//manufacturedProduct//asEntityWithGeneric//genericMedicine/name\")\n generic_name = self.strip_newline_tab(generic_name[0].text) if len(generic_name) > 0 else \"\"\n metadata[\"genericName\"] = generic_name\n\n ## dosage form\n dosage_form = self.tree_et.xpath(\".//manufacturedProduct//formCode/@displayName\")\n dosage_form = dosage_form[0] if len(dosage_form) > 0 else \"\"\n metadata[\"dosageForm\"] = dosage_form\n\n # active ingredients\n substance_name = sorted([self.strip_newline_tab(a.text) for a in\n self.tree_et.xpath(\".//.//manufacturedProduct//activeMoiety/activeMoiety/name\")])\n substance_name = \", \".join(set(substance_name))\n metadata[\"substanceName\"] = substance_name\n\n ## inactive ingredients\n inactive_ingredients = sorted([self.strip_newline_tab(inactive.text) for inactive in self.tree_et.xpath(\n \".//manufacturedProduct//inactiveIngredient/inactiveIngredientSubstance/name\")])\n\n if len(inactive_ingredients) == 0:\n inactive_ingredients = \"\"\n else:\n inactive_ingredients = \",\".join(set(inactive_ingredients))\n\n metadata[\"inactiveIngredients\"] = inactive_ingredients\n\n ## other ingredients\n ingredients = sorted([self.strip_newline_tab(ingredient.text) for ingredient in\n self.tree_et.xpath(\".//manufacturedProduct//ingredient/ingredientSubstance/name\")])\n\n if len(ingredients) == 0:\n ingredients = \"\"\n else:\n ingredients = \", \".join(set(ingredients))\n metadata[\"ingredients\"] = ingredients\n\n # marketing_category\n marketing_category = self.tree_et.xpath(\".//manufacturedProduct/subjectOf/approval/code/@displayName\")\n marketing_category = self.strip_newline_tab(marketing_category[0]) if len(marketing_category) > 0 else \"\"\n metadata[\"marketingCategory\"] = marketing_category\n\n # consumed in\n consumed_in = self.tree_et.xpath(\n \".//manufacturedProduct//consumedIn/substanceAdministration/routeCode/@displayName\")\n consumed_in = consumed_in[0] if len(consumed_in) > 0 else \"\"\n metadata[\"consumedIn\"] = consumed_in\n\n # revision date\n marketing_date = self.tree_et.xpath(\".//manufacturedProduct//marketingAct/effectiveTime/low/@value\")\n marketing_date = self.__normalize_date(marketing_date)\n metadata[\"marketingDate\"] = marketing_date\n\n return metadata",
"def _get_geoms(self, root, _parent=None):\n # Initialize return array\n geom_pairs = []\n # If the parent exists and this is a geom element, we add this current (parent, element) combo to the output\n if _parent is not None and root.tag == \"geom\":\n geom_pairs.append((_parent, root))\n # Loop through all children elements recursively and add to pairs\n for child in root:\n geom_pairs += self._get_geoms(child, _parent=root)\n # Return all found pairs\n return geom_pairs",
"def parse_journal_article_record(root) -> dict:\n\n # print(\"Root\", root)\n # pmid = root.find(\"PMID\").text\n # print(\"PMID\", pmid)\n # quit()\n\n doc = {\n \"abstract\": \"\",\n \"pmid\": \"\",\n \"title\": \"\",\n \"authors\": [],\n \"pub_date\": \"\",\n \"journal_iso_title\": \"\",\n \"journal_title\": \"\",\n \"doi\": \"\",\n \"compounds\": [],\n \"mesh\": [],\n }\n\n doc[\"pmid\"] = root.xpath(\".//PMID/text()\")[0]\n\n doc[\"title\"] = next(iter(root.xpath(\".//ArticleTitle/text()\")), \"\")\n\n # TODO https:.//stackoverflow.com/questions/4770191/lxml-etree-element-text-doesnt-return-the-entire-text-from-an-element\n atext = next(iter(root.xpath(\".//Abstract/AbstractText/text()\")), \"\")\n\n for abstracttext in root.xpath(\".//Abstract/AbstractText\"):\n abstext = node_text(abstracttext)\n\n label = abstracttext.get(\"Label\", None)\n if label:\n doc[\"abstract\"] += f\"{label}: {abstext}\\n\"\n else:\n doc[\"abstract\"] += f\"{abstext}\\n\"\n\n doc[\"abstract\"] = doc[\"abstract\"].rstrip()\n\n doc[\"authors\"] = []\n for author in root.xpath(\".//Author\"):\n last_name = next(iter(author.xpath(\"LastName/text()\")), \"\")\n first_name = next(iter(author.xpath(\"ForeName/text()\")), \"\")\n initials = next(iter(author.xpath(\"Initials/text()\")), \"\")\n if not first_name and initials:\n first_name = initials\n doc[\"authors\"].append(f\"{last_name}, {first_name}\")\n\n pub_year = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Year/text()\")), None)\n pub_mon = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Month/text()\")), \"Jan\")\n pub_day = next(iter(root.xpath(\".//Journal/JournalIssue/PubDate/Day/text()\")), \"01\")\n medline_date = next(\n iter(root.xpath(\".//Journal/JournalIssue/PubDate/MedlineDate/text()\")), None\n )\n\n pub_date = process_pub_date(doc[\"pmid\"], pub_year, pub_mon, pub_day, medline_date)\n\n doc[\"pub_date\"] = pub_date\n doc[\"journal_title\"] = next(iter(root.xpath(\".//Journal/Title/text()\")), \"\")\n doc[\"joural_iso_title\"] = next(iter(root.xpath(\".//Journal/ISOAbbreviation/text()\")), \"\")\n doc[\"doi\"] = next(iter(root.xpath('.//ArticleId[@IdType=\"doi\"]/text()')), None)\n\n doc[\"compounds\"] = []\n for chem in root.xpath(\".//ChemicalList/Chemical/NameOfSubstance\"):\n chem_id = chem.get(\"UI\")\n doc[\"compounds\"].append({\"id\": f\"MESH:{chem_id}\", \"name\": chem.text})\n\n compounds = [cmpd[\"id\"] for cmpd in doc[\"compounds\"]]\n doc[\"mesh\"] = []\n for mesh in root.xpath(\".//MeshHeading/DescriptorName\"):\n mesh_id = f\"MESH:{mesh.get('UI')}\"\n if mesh_id in compounds:\n continue\n doc[\"mesh\"].append({\"id\": mesh_id, \"name\": mesh.text})\n\n return doc",
"def get_all_tagged(self,tag_name):\n return self.tag2elements[tag_name]",
"def _process_article(el):\n assert len(el) == 1 # We only expect one article record\n el = el[0]\n\n all_tags = [c.tag for c in el.getchildren()]\n title = el.xpath('ArticleTitle')[0].text\n publication_types = [\n pt.text for pt in el.xpath('PublicationTypeList/PublicationType')]\n if all([pt not in DESIRED_PUBLICATION_TYPES for pt in publication_types]\n ) or \\\n any([pt in EXCLUDED_PUBLICATION_TYPES for pt in publication_types]\n ):\n raise PublicationTypeException\n # Only keep the publication types we're interested in\n publication_types = set(\n publication_types).intersection(DESIRED_PUBLICATION_TYPES)\n abstract = el.xpath(\n 'Abstract/AbstractText')[0].text if 'Abstract' in all_tags else None\n return {'title': title,\n 'abstract': abstract,\n 'publication_types': publication_types}",
"def items(self):\r\n return self.elements.values()",
"def return_xml_dict(self):\n\ttry:\n\t self.Correct_MultiRoot_XML()\n\t self.subtitle_dict = sorted(self.xmltodict(self.XML_String)['subtitle'],key=itemgetter('imdb','cd'))\n\t #self.subtitle_dict = self.xmltodict(self.XML_String)['subtitle']\n\t print \"XML subtitle list downloaded and converted to dict\"\n\t return True\n\texcept:\n\t print \"XML subtitle list not downloaded or converterd.\"\n\t return False",
"def parse(self):\n return []",
"def parse_one_xml(xml_file, fields=None):\r\n tree = etree.iterparse(xml_file)\r\n \r\n d = dict.fromkeys(fields)\r\n for event, elem in tree:\r\n if elem.tag == 'SAMPLE_ATTRIBUTE':\r\n for f in fields:\r\n if elem.getchildren()[0].text == f:\r\n d[f] = elem.getchildren()[1].text \r\n return d",
"def results(self) -> Dict[str, Any]:\n return self.nodes",
"def get_attrs(post_content):\n for attribute in post_content.find_all(\"p\", {\"class\": \"attrgroup\"}):\n for attr in attribute.find_all(\"span\"):\n attr_text = attr.text.strip()\n if attr_text:\n yield attr_text.lower()",
"def get_element_pdos(dos,el):\n \n el_dos = {}\n for site, atom_dos in dos.pdos.items(): \n ## .items() return (key,value) pairs\n if site.specie == Element(el):\n for orb, pdos in atom_dos.items():\n if orb not in el_dos:\n el_dos[orb] = pdos\n else:\n el_dos[orb] = add_densities([el_dos[orb], pdos])\n\n return {orb: Dos(dos.efermi, dos.energies, densities)\n for orb, densities in el_dos.items()}",
"def getReferenceDetails(soup):\n refDict = {}\n refs = soup.find_all('edmx:reference')\n for ref in refs:\n includes = ref.find_all('edmx:include')\n for item in includes:\n if item.get('namespace') is None or ref.get('uri') is None:\n rsvLogger.error(\"Reference incorrect for: \", item)\n continue\n if item.get('alias') is not None:\n refDict[item['alias']] = (item['namespace'], ref['uri'])\n else:\n refDict[item['namespace']] = (item['namespace'], ref['uri'])\n refDict[item['namespace'].split('.')[0]] = (item['namespace'], ref['uri'])\n return refDict",
"def get_attributes_from_child(child):\n return [{'element': child,\n 'attribute': x.attrib,\n 'tag': x.tag,\n 'keys': x.keys()} for x in child]",
"def parse_mapping_page(self, id, body):\n info = {}\n info['original'] = self.__re_search(body, *self.regx['original'])\n info['save'] = self.__re_search(body, *self.regx['save'])\n info['price'] = self.__re_search(body, *self.regx['price'])\n info['rebate'] = self.__re_search(body, *self.regx['rebate'])\n return info",
"def process_subtags(element, node):\n \n for tag in element.iter(\"tag\"):\n tag_key = tag.attrib['k']\n tag_val = tag.attrib['v']\n \n # Check for problem characters\n if problemchars.match(tag_key):\n continue\n \n # fix tag 'v' attribute of streetname and postcode\n elif tag_key.startswith(\"addr:\"):\n if not \"address\" in node.keys():\n node[\"address\"] = {}\n addr_key = tag.attrib['k'][len(\"addr:\") : ]\n if lower_colon.match(addr_key):\n continue\n else:\n if tag.attrib['k'] == \"addr:street\":\n fixed_v, change = correct_street_type(tag_val)\n elif tag.attrib['k'] == \"addr:postcode\":\n fixed_v, change = correct_postcode(tag.attrib['v'])\n else:\n fixed_v = tag_val\n if fixed_v != None:\n node[\"address\"][addr_key] = fixed_v\n \n # fix fax and phone number\n elif tag_key == \"fax\" or tag_key == \"phone\":\n fixed_v, chang = correct_number(tag_val)\n node[tag_key] = fixed_v\n \n #fix multiple tag_key confusing. These two tag_key in the list have same meaing, \n #so just keep the latter one in the list and change the former to the latter\n elif tag_key in [ u'应急避难场所疏散人数万人',u'应急避难场所疏散人口万人']:\n node[u'应急避难场所疏散人口万人'] = tag_val\n \n # '疏散人数' and '疏散人数(万)' are two similar tag_key. Inthis way below, we change '疏散人数' to '疏散人数(万)'\n # by doing some math.\n elif tag_key == u'疏散人数':\n node[u'疏散人数(万)'] = str(round(float(tag_val.split()[0].replace(',',''))/10000,2))\n elif tag_val != None:\n node[tag_key] = tag_val\n \n return node",
"def parse(fname):\n\n tree = pbsXml.parse(fname)\n\n if not tree:\n tools.error('failed to parse pbsdump xml file ' + fname)\n return 0\n\n root = tree.getroot()\n\n nodes = dict() # Hold list of nodes\n\n # Iterate on all Node items\n for child in root.findall('Node'):\n # Get node name\n name = child.find('name').text\n\n # Build new entry for the given node\n nodes[name] = dict()\n node = nodes[name]\n matches = GET_NODE_ID.match(name)\n node[id] = int(matches.group(1))\n\n # Collect data\n node['name'] = name\n node['np'] = int(child.find('np').text)\n node['state'] = child.find('state').text\n node['power_state'] = child.find('power_state').text\n data = child.find('jobs')\n if data is not None:\n node['jobs'] = data.text\n else:\n node['jobs'] = None\n\n node['nb_sockets'] = child.find('total_sockets').text\n node['nb_numa_nodes'] = child.find('total_numa_nodes').text\n props = child.find('properties').text\n node['properties'] = props.split(',')\n\n # Get the status entries\n node['status'] = dict()\n data = child.find('status')\n if data is None:\n tools.error('Node ' + name + \" has no status entry! Skipped.\")\n continue\n\n status = data.text\n status_list = status.split(',')\n for entry in status_list:\n data = entry.split('=')\n matches = IS_BYTE_SIZE.match(data[1])\n if matches:\n # Convert whatever size in GB\n data[1] = tools.size_convert(matches.group(1), matches.group(2), 'gb')\n\n # Keep the data\n node['status'][data[0]] = data[1]\n\n return nodes"
] |
[
"0.63759995",
"0.6353038",
"0.620662",
"0.6160831",
"0.6060549",
"0.6026878",
"0.60247046",
"0.5937487",
"0.5935178",
"0.5895591",
"0.5833998",
"0.58304346",
"0.5824379",
"0.58219045",
"0.5707903",
"0.5669245",
"0.5667443",
"0.56660455",
"0.5661833",
"0.5661833",
"0.5661279",
"0.562904",
"0.5626109",
"0.5598012",
"0.558992",
"0.558302",
"0.55548936",
"0.5547756",
"0.5523492",
"0.5521544",
"0.5508013",
"0.5482391",
"0.5474641",
"0.5467026",
"0.546035",
"0.5444682",
"0.54395217",
"0.54169565",
"0.5405097",
"0.54004335",
"0.540019",
"0.5395445",
"0.53929245",
"0.5364471",
"0.5357936",
"0.53542644",
"0.5335221",
"0.53296626",
"0.5327882",
"0.53183645",
"0.53169847",
"0.5315984",
"0.53077614",
"0.5271714",
"0.52644044",
"0.5263715",
"0.526105",
"0.5256422",
"0.52552193",
"0.52539897",
"0.5251073",
"0.5248198",
"0.52456754",
"0.5240326",
"0.52402717",
"0.52374476",
"0.52357745",
"0.52187836",
"0.52187425",
"0.52184474",
"0.5215375",
"0.5202723",
"0.5193679",
"0.51877254",
"0.51791537",
"0.51762235",
"0.51756275",
"0.5175373",
"0.5174338",
"0.51733166",
"0.5165284",
"0.5156484",
"0.5148173",
"0.51477426",
"0.51379347",
"0.5136722",
"0.5133677",
"0.5126253",
"0.51259637",
"0.5122264",
"0.511351",
"0.510984",
"0.51052547",
"0.5095813",
"0.5091032",
"0.5089635",
"0.5084069",
"0.5083835",
"0.50760216",
"0.5075776"
] |
0.6134572
|
4
|
Get the event information for a Betfair market ID.
|
def get_event_info(self, market_id: str) -> Tuple[str, str, str]:
market_filter_ = market_filter(market_ids=[market_id])
event_type = (
self._client
.betting
.list_event_types(filter=market_filter_)[0]
.event_type
.name
)
event = (
self._client
.betting
.list_events(filter=market_filter_)[0]
.event
.name
)
competition = (
self._client
.betting
.list_competitions(filter=market_filter_)[0]
.competition
.name
)
return event_type, event, competition
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_market_info(\n self, market_id: str\n ) -> Tuple[str, datetime, Dict[int, str]]:\n market_filter_ = market_filter(market_ids=[market_id])\n\n market = (\n self._client\n .betting\n .list_market_catalogue(\n filter=market_filter_,\n market_projection=['MARKET_START_TIME', 'RUNNER_DESCRIPTION']\n )[0]\n )\n\n market_name = market.market_name\n market_start_time = market.market_start_time\n\n selections = {}\n for runner in market.runners:\n selections[runner.selection_id] = runner.runner_name\n\n return market_name, market_start_time, selections",
"def get_event(self, eventid):\n return self.s.query(Event).get(eventid)",
"def retrieve(cls, event_id):\n return Event(Requester.get(cls.endpoint + '/' + event_id))",
"def get_event(self, event_id):\n if not event_id:\n return None\n\n return self.service.events().get(calendarId=self.calendar_id, eventId=event_id).execute()",
"def getEventById(self, eventid):\n\n e_id = EventId()\n e_id.setHashed(eventid)\n event = Event.getById(e_id)\n return event.getAsDict()",
"def get_event_details(eventId):\n response = client.query(\n TableName=\"EventsSingleTable\",\n # IndexName='',\n Select=\"ALL_ATTRIBUTES\",\n KeyConditionExpression=\"pk = :pk\",\n ExpressionAttributeValues={\":pk\": eventId},\n )\n\n items = response[\"Items\"]\n\n # Try serializing multiple entities from a single request\n for item in items:\n if item[\"sk\"] == item[\"pk\"]:\n e = Event(**item)\n pprint.pprint(str(e))\n else:\n c = Comment(**item)\n pprint.pprint(str(c))",
"def get_event_info(self, event_id, locale=None):\n req = BFGlobalFactory.create(\"ns1:GetEventsReq\")\n req.eventParentId = event_id\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFGlobalService.getEvents, req)\n if rsp.errorCode not in (GetEventsErrorEnum.OK,\n GetEventsErrorEnum.NO_RESULTS):\n error_code = rsp.errorCode\n if error_code == GetEventsErrorEnum.API_ERROR:\n error_code = rps.header.errorCode\n logger.error(\"{getEvents} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n event_items = rsp.eventItems[0] if rsp.eventItems else []\n event_items = [BFEvent(**{k: v for k, v in evt})\n for evt in event_items if evt]\n market_items = rsp.marketItems[0] if rsp.marketItems else []\n market_items = [MarketSummary(**{k: v for k, v in mi})\n for mi in market_items if mi]\n coupon_links = rsp.couponLinks[0] if rsp.couponLinks else []\n coupon_links = [CouponLink(**{k: v for k, v in cl})\n for cl in coupon_links if cl]\n rsp = EventInfo(event_items, rsp.eventParentId, market_items,\n coupon_links)\n return rsp",
"def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event = self.session.query(Event).filter_by(id=id).scalar()\n if not event:\n raise exc.NotFound(\"No such Event {} found\".format(id))\n\n json = event.to_dict(base_uri=self.href_prefix, expand=expand)\n\n self.success(json)",
"def get_event_by_id(event_id):\n db = get_db()\n return db.execute((\n 'SELECT id, name, start_time, end_time, location '\n 'FROM event WHERE id=?'),\n (event_id,)).fetchone()",
"def get_market_info_lite(self, market_id):\n req = BFExchangeFactory.create(\"ns1:GetMarketInfoReq\")\n req.marketId = market_id\n rsp = self._soapcall(BFExchangeService.getMarketInfo, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code != GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarketInfo} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n info = MarketInfoLite(**{k: v for k, v in rsp.marketLite})\n return info",
"def get_event(self):\n return self.keys.events.get()",
"def get_event(self, param):\n\n if param is None:\n return None\n if isinstance(param, str):\n url = self.build_url(\n self._endpoints.get('get_event').format(id=self.calendar_id,\n ide=param))\n params = None\n by_id = True\n else:\n url = self.build_url(\n self._endpoints.get('get_events').format(id=self.calendar_id))\n params = {'$top': 1}\n params.update(param.as_params())\n by_id = False\n\n response = self.con.get(url, params=params,\n headers={'Prefer': 'outlook.timezone=\"UTC\"'})\n if not response:\n return None\n\n if by_id:\n event = response.json()\n else:\n event = response.json().get('value', [])\n if event:\n event = event[0]\n else:\n return None\n return self.event_constructor(parent=self,\n **{self._cloud_data_key: event})",
"def get_event_information(username, name_event):\n config = configparser.ConfigParser() # Use to access to the config file\n config.read('config.ini')\n\n try:\n w3 = Web3(Web3.HTTPProvider(config[username][\"address_node\"]))\n except Exception as e:\n return None, None, e\n\n w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n if w3.isConnected():\n print(\"Connected to the blockchain.\")\n w3.eth.defaultAccount = w3.eth.accounts[0] # Set the sender\n\n address_event, abi_event = get_address_abi(name_event, \"event\")\n event = w3.eth.contract(address=address_event, abi=abi_event)\n\n try:\n date_event = event.functions.getDate().call()\n available_seats_event = event.functions.getAvailableSeats().call()\n seats_price = event.functions.getSeatsPrice().call()\n\n artist_event = event.functions.getArtist().call()\n location_event = event.functions.getLocation().call()\n description_event = event.functions.getDescription().call()\n except Exception as e:\n return None, None, None, None, None, None, e\n\n return date_event, available_seats_event, seats_price, artist_event, location_event, description_event, None",
"def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)",
"def get_event_details(session, event_id):\n page = session.get('http://www.usacycling.org/results/index.php?ajax=1&act=infoid&info_id='+str(event_id), headers=HDRS)\n # print(page.text)\n event = json_text(page)\n if \"No results found.\" not in event:\n #print('Race is: {} and race name is: {}'.format(e, race.find('h3').getText()))\n info = {'name': ''.join(t for t in event.find('h3').find_all(text=True)), 'races':{}}\n for a in event.find_all('li'):\n info['races'][a.find('a').contents[0]] = a.get('id').split()\n return info\n else:\n return None",
"def GetEventIdentifier(self):\n return self._event_identifier",
"def quote_endpoint(self, market_id):\n self._wait_before_call()\n market = self._format_market_id(market_id)\n try:\n data, meta_data = self.TS.get_quote_endpoint(\n symbol=market, outputsize=\"full\"\n )\n return data\n except:\n logging.error(\"AlphaVantage wrong api call for {}\".format(market))\n return None",
"def query_event_by_id():\n try:\n event_id = request.args['event_id']\n response = requests.put(app.config['EVENTS_ENDPOINT'] + event_id)\n if response.status_code == 200:\n return render_template(\n 'search_results.html',\n auth=is_organizer(get_user()),\n events=parse_events(response.json()),\n app_config=app.config\n )\n else:\n return 'Unable to retrieve events', 500\n except BadRequestKeyError as error:\n return f'Error: {error}.', 400",
"def get_event(event_id):\n try:\n return Event.objects.get(id=event_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no event with id={}.'.format(event_id))",
"def select_event(self, event_id):\n with self.conn:\n self.c.execute(\n \"\"\"SELECT * FROM {table} WHERE {event} = ?\"\"\".format(\n table=TABLE, event=EVENT\n ),\n (event_id,),\n )\n return self.c.fetchone()",
"def event_id(self):\n return self._event_id",
"def get_event_eid(eid):\n return EventModel.query.get_or_404(eid)",
"def event_get(tenant_id, user_id=None):",
"def get_event(self):\r\n return self.events[0]",
"def get_market_info(self, market_id, lite=True, coupon_links=False,\n locale=None):\n req = BFExchangeFactory.create(\"ns1:GetMarketReq\")\n req.marketId = market_id\n req.includeCouponLinks = coupon_links\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFExchangeService.getMarket, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code == GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarket} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n market = rsp.market\n coupons = market.couponLinks[0] if market.couponLinks else []\n coupons = [CouponLink(**{k: v for k, v in coupon})\n for coupon in coupons if coupon]\n runners = market.runners[0] if market.runners else []\n runners = [Runner(**{k: v for k, v in runner})\n for runner in runners if runner]\n hierarchies = market.eventHierarchy[0] if market.eventHierarchy else []\n hierarchies = [evt for evt in hierarchies]\n rsp = MarketInfo(**{k: v for k, v in market})\n info.eventHierarchy = hierarchies\n rsp.couponLinks = coupons\n rsp.runners = runners\n return rsp",
"def event(self, event_id):\r\n return e.Event(self, event_id)",
"def on_market_info(self):\n pass",
"def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp",
"def get_event(self, event_id):\n mask = \"\"\"mask[\n acknowledgedFlag,\n attachments,\n impactedResources,\n statusCode,\n updates,\n notificationOccurrenceEventType]\n \"\"\"\n return self.client.call('Notification_Occurrence_Event', 'getObject', id=event_id, mask=mask)",
"def get_event():\n data = _get_process_detail_expanded_data()[\"event\"]\n return data",
"def get(self, eventId):\n event = EventDao().get_by_id(event_id=eventId)\n event_dict = event.to_dict_view()\n return event_dict",
"def getId(self):\n return _libsbml.Event_getId(self)",
"def get_api_event(self):\n pass",
"def event(self):\n return self.get('callback_id')",
"def get_one(self, message_id):\r\n event_filter = storage.EventFilter(message_id=message_id)\r\n events = [event for event\r\n in pecan.request.storage_conn.get_events(event_filter)]\r\n if not events:\r\n raise EntityNotFound(_(\"Event\"), message_id)\r\n\r\n if len(events) > 1:\r\n LOG.error(_(\"More than one event with \"\r\n \"id %s returned from storage driver\") % message_id)\r\n\r\n event = events[0]\r\n\r\n return Event(message_id=event.message_id,\r\n event_type=event.event_type,\r\n generated=event.generated,\r\n traits=event.traits)",
"def get(self, request):\n return self.serviceHandler.getEvent(request.data)",
"def event(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"event\")",
"def eventdetails(http_request, event_id=0):\n\te = get_object_or_404(Event, pk=event_id)\n\tweather = list(Weather.objects.filter(day=e.edate).filter(zip=e.zip))\n\tif len(weather) == 0:\n\t\tw = None\n\telse:\n\t\tw = weather[0]\n\treturn render_to_response('event_detail.html', {'event': e,\n\t\t\t\t\t\t\t'w': w })",
"async def get_event(\n self,\n event_id: str,\n redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.redact,\n get_prev_content: bool = False,\n allow_rejected: bool = False,\n allow_none: bool = False,\n check_room_id: Optional[str] = None,\n ) -> Optional[EventBase]:\n if not isinstance(event_id, str):\n raise TypeError(\"Invalid event event_id %r\" % (event_id,))\n\n events = await self.get_events_as_list(\n [event_id],\n redact_behaviour=redact_behaviour,\n get_prev_content=get_prev_content,\n allow_rejected=allow_rejected,\n )\n\n event = events[0] if events else None\n\n if event is not None and check_room_id is not None:\n if event.room_id != check_room_id:\n event = None\n\n if event is None and not allow_none:\n raise NotFoundError(\"Could not find event %s\" % (event_id,))\n\n return event",
"def event(self):\n return self.events[0]",
"def to_event(self):\n return f'{self.gem_pack_id},{self.event_time_in_milliseconds},{self.price}\\n'",
"def get_historic_data(self):\n\n historic_market_events = []\n\n return historic_market_events",
"def _callEventGetAll(self, callback_id, event_name):\n return self._event_client.eventGetAll(callback_id, event_name)",
"def query_bid_price(market_data):\n print(\"Consultando BID\")\n if market_data[\"marketData\"][\"BI\"]:\n bid_price = market_data[\"marketData\"][\"BI\"][0][\"price\"]\n print(f\"Precio de BID: ${bid_price:,.2f}\".replace('.', ','))\n return bid_price\n print(\"No hay BIDs activos\")\n return None",
"def apigw_event():\n with open(\"events/event.json\") as json_file:\n return json.load(json_file)",
"def fusion_api_get_events(self, uri=None, param='', api=None, headers=None):\n return self.event.get(uri=uri, api=api, headers=headers, param=param)",
"def get(self, request, project, event_id):\n\n use_snuba = options.get('snuba.events-queries.enabled')\n\n event_cls = event_cls = SnubaEvent if use_snuba else Event\n\n event = event_cls.objects.from_event_id(event_id, project.id)\n if event is None:\n return Response({'detail': 'Event not found'}, status=404)\n\n # populate event data\n Event.objects.bind_nodes([event], 'data')\n\n try:\n committers = get_serialized_event_file_committers(\n project,\n event,\n frame_limit=int(request.GET.get('frameLimit', 25)),\n )\n except Release.DoesNotExist:\n return Response({'detail': 'Release not found'}, status=404)\n except Commit.DoesNotExist:\n return Response({'detail': 'No Commits found for Release'}, status=404)\n\n # XXX(dcramer): this data is unused, so lets not bother returning it for now\n # serialize the commit objects\n # serialized_annotated_frames = [\n # {\n # 'frame': frame['frame'],\n # 'commits': serialize(frame['commits'])\n # } for frame in annotated_frames\n # ]\n\n data = {\n 'committers': committers,\n # 'annotatedFrames': serialized_annotated_frames\n }\n return Response(data)",
"def getevent(self, filename):\n return self.events[filename.lower()]",
"def current_events(service, calander_id):\n event = service.events().get(calendarId='teamtwotesting@gmail.com', eventId=calander_id).execute()\n return event",
"def getEventIDName(*args, **kwargs):\n pass",
"def event_message(iden: int, event: Any) -> dict[str, Any]:\n return {\"id\": iden, \"type\": \"event\", \"event\": event}",
"def id(self) -> str:\n return self._event.get('id')",
"def get_events(events_id):\n # Filter events matching events_id and select the first one found\n events = Events.query.filter_by(id=events_id).first()\n # If no events matches album_id, respond HTTP 404\n if events is None:\n abort(404)\n # Serialize the album as a JSON object and return it\n schema = EventsSchema()\n return jsonify(schema.dump(events))",
"def getEvent(number):",
"def getEvent(self):\n year, month, day = self.date\n event = Event()\n event.add(\"summary\", \"%s release\" % (self.dict[\"name\"]))\n event.add(\"uid\", \"http://www.freebase.com/view/guid/%s\" % (self.dict['guid'][1:]))\n event.add(\"dtstart\", \"%04d%02d%02d\" % (year,month,day), encode=0)\n return event",
"def get_event(self, instance, feed=None):\n if feed is None:\n feed = self.feed\n if self.client is None:\n self.get_client()\n event_id = CalendarEvent.objects.get_event_id(instance, feed)\n try:\n event = self.client.events().get(calendarId=feed, eventId=event_id).execute()\n except Exception:\n event = None\n return event",
"def getevent(self, name):\n return self.events[name.lower()]",
"def get_market_data(self) -> dict:\n return MarketData(asks=self.get_orders_by_action(OrderAction.SELL, self.deep.ask_count),\n bids=self.get_orders_by_action(OrderAction.BUY, self.deep.bid_count)).format",
"def get_event():\n json_data = request.args or {}\n return make_response(jsonify({ \"data\" : Event.get_events(json_data)}))",
"def getPlugEventId(self, pid, ename):\n for event in self._events.values():\n if event.name == ename and event.pid == pid: \n return event.ID\n return None",
"def event(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"event\")",
"def event(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"event\")",
"def get(self, id):\n offset, limit, expand = self.get_pagination_values()\n event_type = (\n self.session.query(EventType).filter_by(id=id).scalar()\n )\n if not event_type:\n raise exc.NotFound(\"No such EventType {} found\".format(id))\n\n json = event_type.to_dict(self.href_prefix)\n json[\"limit\"] = limit\n json[\"offset\"] = offset\n\n # We will perform expansion of events here b/c we want to apply\n # limits and offsets\n events = []\n for event in (\n event_type.get_latest_events().limit(limit).offset(offset)\n .from_self().order_by(Event.timestamp).all()\n ):\n if \"events\" in expand:\n events.append(\n event.to_dict(\n base_uri=self.href_prefix, expand=set(expand)\n )\n )\n else:\n events.append({\n \"id\": event.id, \"href\": event.href(self.href_prefix)\n })\n json[\"events\"] = events\n\n self.success(json)",
"def get_game_events(game_id):\n # get relevant information from game id\n year, month, day, rest = game_id.split('_', 3)\n # file\n filename = \"gameday-data/year_%s/month_%s/day_%s/gid_%s/game_events.xml\" % (year, month, day, game_id)\n file = os.path.join(os.path.dirname(__file__), filename)\n # check if file exits\n if os.path.isfile(file):\n data = file\n else:\n # get data if file does not exist\n try:\n data = urlopen(\"http://gd2.mlb.com/components/game/mlb/year_%s/month_%s/day_%s/gid_%s/game_events.xml\" % (year, month, day, game_id))\n except HTTPError:\n raise ValueError(\"Could not find a game with that id.\")\n return data",
"def get_event_times(event_id):\r\n r = requests.get(base_url + f'check-ins/v2/events/{event_id}/event_periods?include=event_times&per_page=1', headers=headers3).json()\r\n event_period_id = r['data'][0]['id']\r\n event_times = r['included']\r\n # Map the event time to even time id\r\n event_time_to_id = {time['attributes']['starts_at']:time['id'] for time in event_times}\r\n return event_period_id, event_time_to_id",
"def GetEventDataIdentifier(self):\n return self._event_data_identifier",
"def get_auction(request):\n db = request.registry.db\n tender_id = request.matchdict['tender_id']\n tender = TenderDocument.load(db, tender_id)\n if not tender:\n request.errors.add('url', 'tender_id', 'Not Found')\n request.errors.status = 404\n return\n auction_info = tender.serialize(\"auction\")\n return {'data': auction_info}",
"def getElementBySId(self, *args):\n return _libsbml.Event_getElementBySId(self, *args)",
"def returnTicker(self):\n ticker = {}\n t = self.dpay.rpc.get_ticker(api=\"market_history\")\n ticker = {'highest_bid': float(t['highest_bid']),\n 'latest': float(t[\"latest\"]),\n 'lowest_ask': float(t[\"lowest_ask\"]),\n 'percent_change': float(t[\"percent_change\"]),\n 'bbd_volume': t[\"bbd_volume\"],\n 'dpay_volume': t[\"dpay_volume\"]}\n return ticker",
"def _handleRequestEvseId(self, data):\r\n print(\"\\\"Request EVSE ID\\\" received\")\r\n message = self.whitebeet.v2gParseRequestEvseId(data)\r\n if message['format'] == 0:\r\n print(\"No EVSE ID available\")\r\n try:\r\n self.whitebeet.v2gSetEvseId(None)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))\r\n else:\r\n evseid = \"DE*ABC*E*00001*01\"\r\n print(\"Set EVSE ID: {}\".format(evseid))\r\n try:\r\n self.whitebeet.v2gSetEvseId(evseid)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))",
"def get_events():\n url = app.config['EVENTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_events(response.json())\n raise RuntimeError('Error in retrieving events.')",
"def game_events(game_id):\n # get data from data module\n data = mlbgame.data.get_game_events(game_id)\n # parse XML\n parsed = etree.parse(data)\n root = parsed.getroot()\n # empty output file\n output = {}\n # loop through innings\n innings = root.findall('inning')\n for x in innings:\n output[x.attrib['num']] = {\n 'top': __inning_info(x, 'top'),\n 'bottom': __inning_info(x, 'bottom')\n }\n return output",
"def market_info(self, symbol):\n r = requests.get(self.base_url + f'/game/locations/{symbol}/marketplace', headers = self.auth_header)\n return r.text",
"def __repr__(self):\n return '{}'.format(self._event.get('id'))",
"def fetch_email_status_by_message_id(cls, message_id: str):\n result = cls.mailjet_retrieve.messagehistory.get(id=message_id).json()\n if len(result[\"Data\"]) == 0:\n return None\n recent_event = result[\"Data\"][-1]\n return recent_event",
"def getMyMarketOrders(self, empireID):\n d = {}\n marketOrdersDict = self.getMyDictInfo('marketOrders')\n for marketID, myMarketOrderDict in marketOrdersDict.iteritems():\n if self.systems[myMarketOrderDict['system']].myEmpire.id == empireID:\n d[marketID] = myMarketOrderDict\n return d",
"def get_one(self, id):\n rpc_ilog = objects.event_log.get_by_uuid(\n pecan.request.context, id)\n\n return EventLog.convert_with_links(rpc_ilog)",
"def public_market_data_feed(config, state):\n\n # Sleep until the next market event\n while not state.stopper.is_set():\n\n state.lock.acquire()\n while not state.event_queue.empty():\n\n # Get next event\n event = state.event_queue.get()\n\n # TODO: ugly\n if isinstance(event, dict):\n symbol = event['instrument']\n message_type = event['message-type']\n else:\n symbol = event.instrument\n message_type = event.message_type\n\n for client in state.get_market_data_clients():\n if client.handshaken and client.snapshot_sent:\n subscriptions = client.subscriptions\n if symbol in subscriptions:\n topics = client.subscriptions[symbol]\n if message_type in ['A', 'X', 'M']:\n if 'orderBookL2' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n elif message_type in ['E']:\n if 'trade' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n state.get_current_lob_state(event['instrument']).print()\n\n state.lock.release()\n\n print('Market data dispatching stopped.')",
"def package_specific_event(event_id, format):\n event = Event.query.filter_by(id=event_id).first_or_404()\n return generate_event_package(event, format)",
"def test_get_event(self):\n event = Event(self.client, 123, {})\n\n self.assertEqual(event.action, \"ticket_create\")\n self.assertEqual(event.created, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(event.duration, 300.56)\n self.assertIsNotNone(event.entity)\n self.assertEqual(event.id, 123)\n self.assertEqual(event.message, \"None\")\n self.assertIsNone(event.percent_complete)\n self.assertIsNone(event.rate)\n self.assertTrue(event.read)\n self.assertIsNotNone(event.secondary_entity)\n self.assertTrue(event.seen)\n self.assertIsNone(event.status)\n self.assertIsNone(event.time_remaining)\n self.assertEqual(event.username, \"exampleUser\")",
"def _GetEventTagByIdentifier(self, event_identifier):\n if not self._event_tag_index:\n self._BuildEventTagIndex()\n\n lookup_key = event_identifier.CopyToString()\n event_tag_identifier = self._event_tag_index.get(lookup_key, None)\n if not event_tag_identifier:\n return\n\n return self._GetEventTag(\n event_tag_identifier.stream_number,\n entry_index=event_tag_identifier.entry_index)",
"def event(id):\n form = ContactForm()\n event = Event.query.get_or_404(id)\n other_media = {\"video\": event.video, \"misc_image_paths\": event.misc_images()}\n packages = event.packages.all()\n # commented out because the fake data generated for the demo of\n # this app by the Faker package may inadvertently contain real email addresses\n if form.validate_on_submit():\n # send_email(\n # organizer.email,\n # f\"Event Inquiry - {form.subject.data}\",\n # \"events/email/contact_organizer\",\n # organizer=organizer,\n # form=form,\n # event=event,\n # )\n flash(\"Your email was sent to the event organizer.\", \"success\")\n return redirect(url_for(\"events.event\", id=id))\n return render_template(\n \"events/event.html\",\n event=event,\n venue=event.venue,\n organizer=event.user,\n packages=packages,\n form=form,\n date_format=\"%m/%d/%Y\",\n main_image=event.main_image(),\n time_format=\"%I:%M %p\",\n other_media=other_media,\n )",
"def get_event():\n\t#Get HTTP query args.\n\tno = request.args.get('no')\n\tfilename = request.args.get('filename')\n\tcollection = mongo.db[filename]\n\n\tjsonEncoder = hepmcio_json.HepMCJSONEncoder()\n\thepMCDecoder = hepmcio_json.HepMCJSONDecoder()\n\tjsonDecoder = json.JSONDecoder()\n\t#Everything below same as in the Visualiser view.\n\tevent = collection.find_one({\"type\":\"event\", \"no\":int(no)}, {\"_id\":False})\n\tparticleJson = collection.find({\"type\":\"particle\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tparticles = []\n\tfor particle in particleJson:\n\t\tparticles.append(jsonEncoder.encode(particle))\n\tvertices = []\n\tvertexJson = collection.find({\"type\":\"vertex\", \"event\":event[\"barcode\"]}, {\"_id\":False})\n\tfor vertex in vertexJson:\n\t\tvertices.append(jsonEncoder.encode(vertex))\n\tevent = jsonEncoder.encode(event)\n\n\teventObject = hepmcio_json.EventJSONObject(event, particles, vertices)\n\t\n\tdecodedEvent = hepMCDecoder.decode(eventObject)\n\n\tPT_CUTOFF = 0.0\n\tintParticles = [particle for particle in decodedEvent.particles.values() if particle.status!=1 and \\\n\t\tparticle.mom[0]**2 + particle.mom[1]**2 > PT_CUTOFF**2]\n\t\n\tintParticleAncestors = reduce(operator.concat, [hepmcio.get_ancestors(particle)[:-1] for particle in intParticles])\n\n\tparticles = []\n\tfor particle in (intParticles + intParticleAncestors):\n\t\tparticles.append(jsonDecoder.decode(jsonEncoder.encode(particle)))\n\t\n\tvertices = list(map(jsonDecoder.decode, vertices))\n\t\n\treturn {\"particles\":jsonEncoder.encode(particles), \"vertices\":jsonEncoder.encode(vertices)}",
"def add_event_from_info(db, event_info, event_id, tag):\n\n if 'description' not in event_info.keys():\n return False\n\n if len(event_info['description']) < MIN_CHARS_DESC:\n if VERBOSE:\n print('Failure: event description too short \\\n (>={} chars needed)'.format(MIN_CHARS_DESC))\n return False\n\n if 'name' in event_info.keys():\n ename = event_info['name']\n else:\n ename = None\n\n if 'venue' in event_info.keys():\n if 'name' in event_info['venue'].keys() and event_info['venue']['name']:\n lname = event_info['venue']['name']\n else:\n lname = None\n\n if 'lon' in event_info['venue'].keys() and event_info['venue']['lon']:\n lon = event_info['venue']['lon']\n else:\n lon = None\n\n if 'lat' in event_info['venue'].keys() and event_info['venue']['lat']:\n lat = event_info['venue']['lat']\n else:\n lat = None\n\n if 'address_1' in event_info['venue'].keys() \\\n and event_info['venue']['address_1']:\n address_1 = event_info['venue']['address_1']\n else:\n address_1 = None\n\n if 'zip' in event_info['venue'].keys() and event_info['venue']['zip']:\n zipcode = event_info['venue']['zip']\n else:\n zipcode = None\n\n if 'city' in event_info['venue'].keys() and event_info['venue']['city']:\n city = event_info['venue']['city']\n else:\n city = None\n\n if 'state' in event_info['venue'].keys() \\\n and event_info['venue']['state']:\n state = event_info['venue']['state']\n else:\n state = None\n else:\n lname = lon = lat = address_1 = zipcode = city = state = None\n\n if 'time' in event_info.keys() and event_info['time']:\n start_time = event_info['time']\n else:\n start_time = None\n\n if 'duration' in event_info.keys() and event_info['duration']:\n duration = event_info['duration']\n else:\n duration = None\n\n if 'description' in event_info.keys() and event_info['description']:\n description = event_info['description']\n else:\n description = None\n\n # taglist = []\n # for t in TAGS:\n # if t in description.lower() or t in ename.lower():\n # taglist.append(t)\n #\n # if len(taglist) > 0:\n # print(ename, taglist)\n # else:\n # return\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE mid = %s\n \"\"\",\n (event_id, ))\n\n result = cursor.fetchone()\n\n if result:\n print('Event already in database.')\n return\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE ename = %s\n \"\"\",\n (ename, ))\n if result:\n print('Event already in database.')\n return\n\n loc_query = \\\n \"\"\"\n INSERT\n INTO Locations(lname, lat, lon, address_1, zip, city, state)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor.execute(loc_query, (\n lname,\n lon,\n lat,\n address_1,\n zipcode,\n city,\n state,\n ))\n\n db.commit()\n\n print('Inserted into Locations.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n lid = cursor.fetchone()\n\n start_date = str(datetime.fromtimestamp(start_time / 1000))\n\n if start_date and duration:\n end_date = str(datetime.fromtimestamp((start_time + duration) / 1000))\n else:\n end_date = None\n\n ev_query = \\\n \"\"\"\n INSERT\n INTO Events(ename, start_date, end_date,\n num_attending, lid, description, mid)\n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(ev_query, (\n ename.encode('ascii', 'ignore'),\n start_date,\n end_date,\n 0,\n lid,\n description.encode('ascii', 'ignore'),\n event_id,\n ))\n\n db.commit()\n\n print('Inserted into Events.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n eid = cursor.fetchone()\n\n # for tag in taglist:\n # category = None\n # for c in CATEGORIES:\n # if tag in CATEGORIES[c]:\n # category = c\n\n et_query = \\\n \"\"\"\n INSERT\n INTO EventTags(eid, tag, category)\n VALUES (%s, %s, %s)\n \"\"\"\n\n cursor.execute(et_query, (eid, tag, tag))\n\n db.commit()\n\n print('Inserted into EventTags.')\n\n if VERBOSE:\n print('Finished.')\n return True",
"def getEvent(self):\n\n event = {\n \"summary\": \"H1 \" + self.summary,\n \"location\": LOCATION,\n # The H1 tag \"classifies\" event as a shift\n \"description\": self.description + \"\\n\\nH1 Communication arbetspass\",\n \"start\": {\n \"dateTime\": \"{0}T{1}{2}\".format(self.start.date(), self.start.time(), self.offset)\n },\n \"end\": {\n \"dateTime\": \"{0}T{1}{2}\".format(self.end.date(), self.end.time(), self.offset)\n },\n \"reminders\": {\n \"useDefault\": False,\n \"overrides\": [\n {\n \"method\": \"popup\",\n \"minutes\": 720 # 12 hours\n },\n {\n \"method\": \"popup\",\n \"minutes\": 5 # 12 hours\n }\n ]\n },\n \"colorId\": googleCalendar.EVENT_COLORIDS[\"yellow\"]\n\n }\n return event",
"def GetEventDataByIdentifier(self, identifier):\n return self._GetAttributeContainerWithCache(\n 'event_data', identifier.stream_number,\n entry_index=identifier.entry_index)",
"def get_event_response(network=None, station=None, channel=None,\n eventid=None,starttime=None, endtime=None):\n baseurl = 'http://rdsa.knmi.nl/fdsnws/dataselect/1/query'\n payload = {'starttime':starttime,'endtime':endtime,'network':network,\n 'station':station, 'channel':channel,'nodata':404}\n r = requests.get(baseurl,params=payload)\n print('http response received')\n if r.status_code ==200:\n return {'data':r.content,'starttime':starttime,'endtime':endtime\n ,'channel':channel,'station':station, '_id':eventid}",
"def get(self, case_number, event_id):\n return self._connection.get(\n u\"{}/{}\".format(self._uri_prefix.format(case_number), event_id)\n )",
"def test_event_id(self):\n result = self.test_client.event_id\n\n assert result == \"2130389\"",
"def get_eventhub_info(self):\n self._create_connection()\n eh_name = self.address.path.lstrip('/')\n target = \"amqps://{}/{}\".format(self.address.hostname, eh_name)\n mgmt_client = uamqp.AMQPClient(target, auth=self.auth, debug=self.debug)\n mgmt_client.open(self.connection)\n try:\n mgmt_msg = Message(application_properties={'name': eh_name})\n response = mgmt_client.mgmt_request(\n mgmt_msg,\n constants.READ_OPERATION,\n op_type=b'com.microsoft:eventhub',\n status_code_field=b'status-code',\n description_fields=b'status-description')\n eh_info = response.get_data()\n output = {}\n if eh_info:\n output['name'] = eh_info[b'name'].decode('utf-8')\n output['type'] = eh_info[b'type'].decode('utf-8')\n output['created_at'] = datetime.datetime.fromtimestamp(float(eh_info[b'created_at'])/1000)\n output['partition_count'] = eh_info[b'partition_count']\n output['partition_ids'] = [p.decode('utf-8') for p in eh_info[b'partition_ids']]\n return output\n except:\n raise\n finally:\n mgmt_client.close()",
"def get(self, request, group):\n event = group.get_latest_event()\n\n try:\n return client.get('/events/{}/'.format(event.id), request.user, request.auth)\n except client.ApiError as e:\n return Response(e.body, status=e.status)",
"def getElementByMetaId(self, *args):\n return _libsbml.Event_getElementByMetaId(self, *args)",
"def event_query(timestamp):\n graphql_client = GraphQLClient('https://api.thegraph.com/subgraphs/name/miracle2k/all-the-keeps')\n members = GqlQuery().fields(['address']).query('members').generate()\n bondedECDSAKeep = GqlQuery().fields([members]).query('bondedECDSAKeep').generate()\n deposit = GqlQuery().fields(['id', bondedECDSAKeep]).query('deposit').generate()\n\n queries = []\n for event in event_queries.values():\n queries.append(GqlQuery()\n .fields(['id', 'timestamp', deposit])\n .query(event, input={'where': '{timestamp_gt: ' + str(timestamp) + '}'})\n .generate())\n final_query = GqlQuery().fields(queries).generate()\n\n result = jsonpickle.decode(graphql_client.execute(final_query))\n final_events = {}\n for event_type, events in result['data'].items():\n if not events:\n continue\n final_events.update({event_type: events_parser(events)})\n return final_events",
"def get(self, eventId, uid):\n raise NotImplementedError",
"async def get_current_event(self) -> tuple[Event | None, list[Event]]:\n utc_now = datetime.now(tz=UTC)\n log.debug(f\"Finding active event for: {utc_now}.\")\n\n # Construct an object in the arbitrary year for the purpose of comparison.\n lookup_now = date(year=ARBITRARY_YEAR, month=utc_now.month, day=utc_now.day)\n log.trace(f\"Lookup object in arbitrary year: {lookup_now}.\")\n\n available_events = await self.get_events()\n log.trace(f\"Found {len(available_events)} available events.\")\n\n for event in available_events:\n meta = event.meta\n if not meta.is_fallback and (meta.start_date <= lookup_now <= meta.end_date):\n return event, available_events\n\n log.trace(\"No active event found. Looking for fallback event.\")\n\n for event in available_events:\n if event.meta.is_fallback:\n return event, available_events\n\n log.warning(\"No event is currently active and no fallback event was found!\")\n return None, available_events",
"def apigw_event():\n\n return {\n \"queryStringParameters\": {\n \"snotel_site\": \"322:CO:SNTL\",\n \"days\": \"30\",\n \"element_code\": \"WDIRV\"\n }\n }",
"def get(self, bike_id=None):\n if bike_id is None:\n raise ClientDataError('bike_id is required')\n bike = Bike.query.get_or_404(bike_id)\n if bike.user_id != g.user.id:\n raise ClientDataError('Bike with id {} not found for user {}'.format(bike_id, g.user.id),\n status_code=403)\n event_type = request.args.get('type', '')\n if event_type == '':\n raise ClientDataError('Must include event type in query string')\n events = sorted(bike.maintenance_events, key=lambda x: x.date, reverse=True)\n last_event_date = None\n for event in events:\n if event.description == event_type:\n last_event_date = event.date\n break\n try:\n if last_event_date is None:\n # never happened. return total miles and note\n bike_purchase_date = bike.purchased_at\n miles = get_total_miles_from_date(bike_purchase_date)\n event_found = False\n else:\n miles = get_total_miles_from_date(last_event_date)\n event_found = True\n except StravaApiError as e:\n app.logger.error('Error getting miles: {}'.format(e.message))\n raise StravaApiError()\n\n return {'miles': miles, 'event_found': event_found}, 200",
"def access_event(self, args):\n\t\tr = requests.get(self.github+\"repos/\"+args.org+\"/\"+args.repo+\"/\"+args.event_type)\n\t\tprint(r.headers)\n\t\tprint(type(str(r.headers)))\n\t\t#self.save_to_file(str(r.headers))",
"def get_events(competition_id: int = None,\n season_id: int = None,\n match_id: int = None,\n url=base_url):\n\n assert competition_id != match_id,\\\n \"Set either (competition_id, season_id) or match_id\"\n\n match_url = base_url + \"events/{}.json\"\n\n if match_id is None:\n matches = get_matches(competition_id, season_id, base_url)\n match_ids = [m['match_id'] for m in matches]\n else:\n match_ids = [match_id]\n\n events = []\n for match_id in match_ids:\n for e in requests.get(url=match_url.format(match_id)).json():\n events.append(e)\n\n return events",
"def query_market_data(self, kind_of_price):\n market_data = pyRofex.get_market_data(\n ticker=self.symbol,\n entries=[kind_of_price]\n )\n return market_data"
] |
[
"0.6362536",
"0.63047844",
"0.61040074",
"0.59841615",
"0.5915983",
"0.58161205",
"0.5800413",
"0.5745946",
"0.5725486",
"0.5675717",
"0.56645864",
"0.5656346",
"0.5621131",
"0.55494666",
"0.55292183",
"0.5473335",
"0.5443154",
"0.5440933",
"0.5433231",
"0.5430904",
"0.5429236",
"0.541626",
"0.5402026",
"0.5395631",
"0.53843874",
"0.53761035",
"0.5368661",
"0.5367785",
"0.5367226",
"0.5325093",
"0.53164464",
"0.5275684",
"0.5254406",
"0.52359015",
"0.5225816",
"0.5225238",
"0.5216811",
"0.52128303",
"0.5203103",
"0.5184571",
"0.5160984",
"0.5151237",
"0.5140127",
"0.5137163",
"0.5132927",
"0.512747",
"0.50994706",
"0.50977045",
"0.5086737",
"0.5085944",
"0.5084156",
"0.5083905",
"0.50651485",
"0.5046851",
"0.500787",
"0.50077516",
"0.49833426",
"0.49820504",
"0.49741963",
"0.49715957",
"0.49603412",
"0.49603412",
"0.49548867",
"0.49548775",
"0.49534637",
"0.494016",
"0.49270242",
"0.4918712",
"0.49055168",
"0.4897736",
"0.48858562",
"0.48788038",
"0.48766893",
"0.48741215",
"0.48709863",
"0.48684287",
"0.4866541",
"0.48648545",
"0.48579684",
"0.4855241",
"0.4852936",
"0.48429045",
"0.4833178",
"0.48299655",
"0.48286128",
"0.48285252",
"0.4826261",
"0.48208448",
"0.48165265",
"0.48158422",
"0.4785824",
"0.47804296",
"0.47790778",
"0.47781006",
"0.47764018",
"0.47750208",
"0.47745156",
"0.47721",
"0.47591195",
"0.47571427"
] |
0.78798735
|
0
|
Get the market information from a Betfair market ID.
|
def get_market_info(
self, market_id: str
) -> Tuple[str, datetime, Dict[int, str]]:
market_filter_ = market_filter(market_ids=[market_id])
market = (
self._client
.betting
.list_market_catalogue(
filter=market_filter_,
market_projection=['MARKET_START_TIME', 'RUNNER_DESCRIPTION']
)[0]
)
market_name = market.market_name
market_start_time = market.market_start_time
selections = {}
for runner in market.runners:
selections[runner.selection_id] = runner.runner_name
return market_name, market_start_time, selections
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_market_info_lite(self, market_id):\n req = BFExchangeFactory.create(\"ns1:GetMarketInfoReq\")\n req.marketId = market_id\n rsp = self._soapcall(BFExchangeService.getMarketInfo, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code != GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarketInfo} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n info = MarketInfoLite(**{k: v for k, v in rsp.marketLite})\n return info",
"def getMarket(self):\n return self.market",
"def query_bid_price(market_data):\n print(\"Consultando BID\")\n if market_data[\"marketData\"][\"BI\"]:\n bid_price = market_data[\"marketData\"][\"BI\"][0][\"price\"]\n print(f\"Precio de BID: ${bid_price:,.2f}\".replace('.', ','))\n return bid_price\n print(\"No hay BIDs activos\")\n return None",
"def market(self):\n return self._market",
"def market_info(self, symbol):\n r = requests.get(self.base_url + f'/game/locations/{symbol}/marketplace', headers = self.auth_header)\n return r.text",
"def quote_endpoint(self, market_id):\n self._wait_before_call()\n market = self._format_market_id(market_id)\n try:\n data, meta_data = self.TS.get_quote_endpoint(\n symbol=market, outputsize=\"full\"\n )\n return data\n except:\n logging.error(\"AlphaVantage wrong api call for {}\".format(market))\n return None",
"def get_market_info(self, market_id, lite=True, coupon_links=False,\n locale=None):\n req = BFExchangeFactory.create(\"ns1:GetMarketReq\")\n req.marketId = market_id\n req.includeCouponLinks = coupon_links\n if locale:\n req.locale = locale\n rsp = self._soapcall(BFExchangeService.getMarket, req)\n if rsp.errorCode != GetMarketErrorEnum.OK:\n error_code = rsp.errorCode\n if error_code == GetMarketErrorEnum.API_ERROR:\n error_code = rsp.header.errorCode\n logger.error(\"{getMarket} failed with error {%s}\", error_code)\n raise ServiceError(error_code)\n market = rsp.market\n coupons = market.couponLinks[0] if market.couponLinks else []\n coupons = [CouponLink(**{k: v for k, v in coupon})\n for coupon in coupons if coupon]\n runners = market.runners[0] if market.runners else []\n runners = [Runner(**{k: v for k, v in runner})\n for runner in runners if runner]\n hierarchies = market.eventHierarchy[0] if market.eventHierarchy else []\n hierarchies = [evt for evt in hierarchies]\n rsp = MarketInfo(**{k: v for k, v in market})\n info.eventHierarchy = hierarchies\n rsp.couponLinks = coupons\n rsp.runners = runners\n return rsp",
"def get_markets(self, market):\n url = \"{url}/{market}\".format(url=self.MARKET_SERVICE_URL,\n market=market)\n\n return self.make_request(url)",
"async def fetch_markets(self, params={}):\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # \"success\": 1,\n # \"data\": {\n # \"pairs\": [\n # {\n # \"name\": \"btc_jpy\",\n # \"base_asset\": \"btc\",\n # \"quote_asset\": \"jpy\",\n # \"maker_fee_rate_base\": \"0\",\n # \"taker_fee_rate_base\": \"0\",\n # \"maker_fee_rate_quote\": \"-0.0002\",\n # \"taker_fee_rate_quote\": \"0.0012\",\n # \"unit_amount\": \"0.0001\",\n # \"limit_max_amount\": \"1000\",\n # \"market_max_amount\": \"10\",\n # \"market_allowance_rate\": \"0.2\",\n # \"price_digits\": 0,\n # \"amount_digits\": 4,\n # \"is_enabled\": True,\n # \"stop_order\": False,\n # \"stop_order_and_cancel\": False\n # }\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data')\n pairs = self.safe_value(data, 'pairs', [])\n result = []\n for i in range(0, len(pairs)):\n entry = pairs[i]\n id = self.safe_string(entry, 'name')\n baseId = self.safe_string(entry, 'base_asset')\n quoteId = self.safe_string(entry, 'quote_asset')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n result.append({\n 'id': id,\n 'symbol': base + '/' + quote,\n 'base': base,\n 'quote': quote,\n 'settle': None,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': None,\n 'type': 'spot',\n 'spot': True,\n 'margin': False,\n 'swap': False,\n 'future': False,\n 'option': False,\n 'active': self.safe_value(entry, 'is_enabled'),\n 'contract': False,\n 'linear': None,\n 'inverse': None,\n 'taker': self.safe_number(entry, 'taker_fee_rate_quote'),\n 'maker': self.safe_number(entry, 'maker_fee_rate_quote'),\n 'contractSize': None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': self.parse_number(self.parse_precision(self.safe_string(entry, 'amount_digits'))),\n 'price': self.parse_number(self.parse_precision(self.safe_string(entry, 'price_digits'))),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.safe_number(entry, 'unit_amount'),\n 'max': self.safe_number(entry, 'limit_max_amount'),\n },\n 'price': {\n 'min': None,\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': entry,\n })\n return result",
"def get_market_orderbook(self, market):\n return self.__call__('market', 'getmarketorderbook',\n {'marketname': market})",
"def call(self, ids):\n\n self.req.MarketIds = ids\n betlog.betlog.info('calling BDAQ Api GetMarketInformation')\n result = self.client.service.GetMarketInformation(self.req)\n # note the raw data is returned here\n return result",
"def get_market_data(self) -> dict:\n return MarketData(asks=self.get_orders_by_action(OrderAction.SELL, self.deep.ask_count),\n bids=self.get_orders_by_action(OrderAction.BUY, self.deep.bid_count)).format",
"def get_markets(self):\n\n #\n\n result = self.api_query('info')\n detail = []\n for key, value in result['pairs'].items():\n IsActive = False\n if value['hidden'] ==0:\n IsActive = True\n dict_result = {'MarketCurrency':key.split('_')[0],'BaseCurrency': key.split('_')[1], 'MarketName':key,'IsActive':IsActive}\n detail.append(dict_result)\n\n result={'success' : True, 'message':'', 'result':detail}\n return result",
"def public_ticker(self, market_symbol):\n return self.get(f'markets/{market_symbol}/ticker')",
"def get_event_info(self, market_id: str) -> Tuple[str, str, str]:\n market_filter_ = market_filter(market_ids=[market_id])\n\n event_type = (\n self._client\n .betting\n .list_event_types(filter=market_filter_)[0]\n .event_type\n .name\n )\n\n event = (\n self._client\n .betting\n .list_events(filter=market_filter_)[0]\n .event\n .name\n )\n\n competition = (\n self._client\n .betting\n .list_competitions(filter=market_filter_)[0]\n .competition\n .name\n )\n\n return event_type, event, competition",
"def query_market_data(self, kind_of_price):\n market_data = pyRofex.get_market_data(\n ticker=self.symbol,\n entries=[kind_of_price]\n )\n return market_data",
"async def fetch_markets(self, params={}):\n spotMarketsInfo = await self.publicGetConfPubInfoPair(params)\n futuresMarketsInfo = await self.publicGetConfPubInfoPairFutures(params)\n spotMarketsInfo = self.safe_value(spotMarketsInfo, 0, [])\n futuresMarketsInfo = self.safe_value(futuresMarketsInfo, 0, [])\n markets = self.array_concat(spotMarketsInfo, futuresMarketsInfo)\n marginIds = await self.publicGetConfPubListPairMargin(params)\n marginIds = self.safe_value(marginIds, 0, [])\n #\n # [\n # \"1INCH:USD\",\n # [\n # null,\n # null,\n # null,\n # \"2.0\",\n # \"100000.0\",\n # null,\n # null,\n # null,\n # null,\n # null,\n # null,\n # null\n # ]\n # ]\n #\n result = []\n for i in range(0, len(markets)):\n pair = markets[i]\n id = self.safe_string_upper(pair, 0)\n market = self.safe_value(pair, 1, {})\n spot = True\n if id.find('F0') >= 0:\n spot = False\n swap = not spot\n baseId = None\n quoteId = None\n if id.find(':') >= 0:\n parts = id.split(':')\n baseId = parts[0]\n quoteId = parts[1]\n else:\n baseId = id[0:3]\n quoteId = id[3:6]\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n splitBase = base.split('F0')\n splitQuote = quote.split('F0')\n base = self.safe_string(splitBase, 0)\n quote = self.safe_string(splitQuote, 0)\n symbol = base + '/' + quote\n baseId = self.get_currency_id(baseId)\n quoteId = self.get_currency_id(quoteId)\n settle = None\n settleId = None\n if swap:\n settle = quote\n settleId = quote\n symbol = symbol + ':' + settle\n minOrderSizeString = self.safe_string(market, 3)\n maxOrderSizeString = self.safe_string(market, 4)\n margin = False\n if spot and self.in_array(id, marginIds):\n margin = True\n result.append({\n 'id': 't' + id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': 'spot' if spot else 'swap',\n 'spot': spot,\n 'margin': margin,\n 'swap': swap,\n 'future': False,\n 'option': False,\n 'active': True,\n 'contract': swap,\n 'linear': True if swap else None,\n 'inverse': False if swap else None,\n 'contractSize': self.parse_number('1') if swap else None,\n 'expiry': None,\n 'expiryDatetime': None,\n 'strike': None,\n 'optionType': None,\n 'precision': {\n 'amount': int('8'), # https://github.com/ccxt/ccxt/issues/7310\n 'price': int('5'),\n },\n 'limits': {\n 'leverage': {\n 'min': None,\n 'max': None,\n },\n 'amount': {\n 'min': self.parse_number(minOrderSizeString),\n 'max': self.parse_number(maxOrderSizeString),\n },\n 'price': {\n 'min': self.parse_number('1e-8'),\n 'max': None,\n },\n 'cost': {\n 'min': None,\n 'max': None,\n },\n },\n 'info': market,\n })\n return result",
"async def fetch_markets(self, params={}):\n response = await self.publicGetInstrumentActiveAndIndices(params)\n #\n # [\n # {\n # \"symbol\": \"LTCUSDT\",\n # \"rootSymbol\": \"LTC\",\n # \"state\": \"Open\",\n # \"typ\": \"FFWCSX\",\n # \"listing\": \"2021-11-10T04:00:00.000Z\",\n # \"front\": \"2021-11-10T04:00:00.000Z\",\n # \"expiry\": null,\n # \"settle\": null,\n # \"listedSettle\": null,\n # \"relistInterval\": null,\n # \"inverseLeg\": \"\",\n # \"sellLeg\": \"\",\n # \"buyLeg\": \"\",\n # \"optionStrikePcnt\": null,\n # \"optionStrikeRound\": null,\n # \"optionStrikePrice\": null,\n # \"optionMultiplier\": null,\n # \"positionCurrency\": \"LTC\", # can be empty for spot markets\n # \"underlying\": \"LTC\",\n # \"quoteCurrency\": \"USDT\",\n # \"underlyingSymbol\": \"LTCT=\", # can be empty for spot markets\n # \"reference\": \"BMEX\",\n # \"referenceSymbol\": \".BLTCT\", # can be empty for spot markets\n # \"calcInterval\": null,\n # \"publishInterval\": null,\n # \"publishTime\": null,\n # \"maxOrderQty\": 1000000000,\n # \"maxPrice\": 1000000,\n # \"lotSize\": 1000,\n # \"tickSize\": 0.01,\n # \"multiplier\": 100,\n # \"settlCurrency\": \"USDt\", # can be empty for spot markets\n # \"underlyingToPositionMultiplier\": 10000,\n # \"underlyingToSettleMultiplier\": null,\n # \"quoteToSettleMultiplier\": 1000000,\n # \"isQuanto\": False,\n # \"isInverse\": False,\n # \"initMargin\": 0.03,\n # \"maintMargin\": 0.015,\n # \"riskLimit\": 1000000000000, # can be null for spot markets\n # \"riskStep\": 1000000000000, # can be null for spot markets\n # \"limit\": null,\n # \"capped\": False,\n # \"taxed\": True,\n # \"deleverage\": True,\n # \"makerFee\": -0.0001,\n # \"takerFee\": 0.0005,\n # \"settlementFee\": 0,\n # \"insuranceFee\": 0,\n # \"fundingBaseSymbol\": \".LTCBON8H\", # can be empty for spot markets\n # \"fundingQuoteSymbol\": \".USDTBON8H\", # can be empty for spot markets\n # \"fundingPremiumSymbol\": \".LTCUSDTPI8H\", # can be empty for spot markets\n # \"fundingTimestamp\": \"2022-01-14T20:00:00.000Z\",\n # \"fundingInterval\": \"2000-01-01T08:00:00.000Z\",\n # \"fundingRate\": 0.0001,\n # \"indicativeFundingRate\": 0.0001,\n # \"rebalanceTimestamp\": null,\n # \"rebalanceInterval\": null,\n # \"openingTimestamp\": \"2022-01-14T17:00:00.000Z\",\n # \"closingTimestamp\": \"2022-01-14T18:00:00.000Z\",\n # \"sessionInterval\": \"2000-01-01T01:00:00.000Z\",\n # \"prevClosePrice\": 138.511,\n # \"limitDownPrice\": null,\n # \"limitUpPrice\": null,\n # \"bankruptLimitDownPrice\": null,\n # \"bankruptLimitUpPrice\": null,\n # \"prevTotalVolume\": 12699024000,\n # \"totalVolume\": 12702160000,\n # \"volume\": 3136000,\n # \"volume24h\": 114251000,\n # \"prevTotalTurnover\": 232418052349000,\n # \"totalTurnover\": 232463353260000,\n # \"turnover\": 45300911000,\n # \"turnover24h\": 1604331340000,\n # \"homeNotional24h\": 11425.1,\n # \"foreignNotional24h\": 1604331.3400000003,\n # \"prevPrice24h\": 135.48,\n # \"vwap\": 140.42165,\n # \"highPrice\": 146.42,\n # \"lowPrice\": 135.08,\n # \"lastPrice\": 144.36,\n # \"lastPriceProtected\": 144.36,\n # \"lastTickDirection\": \"MinusTick\",\n # \"lastChangePcnt\": 0.0655,\n # \"bidPrice\": 143.75,\n # \"midPrice\": 143.855,\n # \"askPrice\": 143.96,\n # \"impactBidPrice\": 143.75,\n # \"impactMidPrice\": 143.855,\n # \"impactAskPrice\": 143.96,\n # \"hasLiquidity\": True,\n # \"openInterest\": 38103000,\n # \"openValue\": 547963053300,\n # \"fairMethod\": \"FundingRate\",\n # \"fairBasisRate\": 0.1095,\n # \"fairBasis\": 0.004,\n # \"fairPrice\": 143.811,\n # \"markMethod\": \"FairPrice\",\n # \"markPrice\": 143.811,\n # \"indicativeTaxRate\": null,\n # \"indicativeSettlePrice\": 143.807,\n # \"optionUnderlyingPrice\": null,\n # \"settledPriceAdjustmentRate\": null,\n # \"settledPrice\": null,\n # \"timestamp\": \"2022-01-14T17:49:55.000Z\"\n # }\n # ]\n #\n result = []\n for i in range(0, len(response)):\n market = response[i]\n id = self.safe_string(market, 'symbol')\n baseId = self.safe_string(market, 'underlying')\n quoteId = self.safe_string(market, 'quoteCurrency')\n settleId = self.safe_string(market, 'settlCurrency')\n base = self.safe_currency_code(baseId)\n quote = self.safe_currency_code(quoteId)\n settle = self.safe_currency_code(settleId)\n # 'positionCurrency' may be empty(\"\", currently returns for ETHUSD)\n # so let's take the settlCurrency first and then adjust if needed\n typ = self.safe_string(market, 'typ') # type definitions at: https://www.bitmex.com/api/explorer/#not /Instrument/Instrument_get\n types = {\n 'FFWCSX': 'swap',\n 'FFWCSF': 'swap',\n 'IFXXXP': 'spot',\n 'FFCCSX': 'future',\n 'MRBXXX': 'index',\n 'MRCXXX': 'index',\n 'MRFXXX': 'index',\n 'MRRXXX': 'index',\n 'MRIXXX': 'index',\n }\n type = self.safe_string(types, typ, typ)\n swap = type == 'swap'\n future = type == 'future'\n spot = type == 'spot'\n contract = swap or future\n contractSize = None\n index = type == 'index'\n isInverse = self.safe_value(market, 'isInverse') # self is True when BASE and SETTLE are same, i.e. BTC/XXX:BTC\n isQuanto = self.safe_value(market, 'isQuanto') # self is True when BASE and SETTLE are different, i.e. AXS/XXX:BTC\n linear = (not isInverse and not isQuanto) if contract else None\n status = self.safe_string(market, 'state')\n active = status != 'Unlisted'\n expiry = None\n expiryDatetime = None\n symbol = None\n if spot:\n symbol = base + '/' + quote\n elif contract:\n symbol = base + '/' + quote + ':' + settle\n multiplierString = Precise.string_abs(self.safe_string(market, 'multiplier'))\n if linear:\n contractSize = self.parse_number(Precise.string_div('1', market['underlyingToPositionMultiplier']))\n else:\n contractSize = self.parse_number(multiplierString)\n if future:\n expiryDatetime = self.safe_string(market, 'expiry')\n expiry = self.parse8601(expiryDatetime)\n symbol = symbol + '-' + self.yymmdd(expiry)\n else:\n # for index/exotic markets, default to id\n symbol = id\n positionId = self.safe_string_2(market, 'positionCurrency', 'underlying')\n position = self.safe_currency_code(positionId)\n positionIsQuote = (position == quote)\n maxOrderQty = self.safe_number(market, 'maxOrderQty')\n initMargin = self.safe_string(market, 'initMargin', '1')\n maxLeverage = self.parse_number(Precise.string_div('1', initMargin))\n result.append({\n 'id': id,\n 'symbol': symbol,\n 'base': base,\n 'quote': quote,\n 'settle': settle,\n 'baseId': baseId,\n 'quoteId': quoteId,\n 'settleId': settleId,\n 'type': type,\n 'spot': spot,\n 'margin': False,\n 'swap': swap,\n 'future': future,\n 'option': False,\n 'index': index,\n 'active': active,\n 'contract': contract,\n 'linear': linear,\n 'inverse': isInverse,\n 'quanto': isQuanto,\n 'taker': self.safe_number(market, 'takerFee'),\n 'maker': self.safe_number(market, 'makerFee'),\n 'contractSize': contractSize,\n 'expiry': expiry,\n 'expiryDatetime': expiryDatetime,\n 'strike': self.safe_number(market, 'optionStrikePrice'),\n 'optionType': None,\n 'precision': {\n 'amount': self.safe_number(market, 'lotSize'),\n 'price': self.safe_number(market, 'tickSize'),\n 'quote': self.safe_number(market, 'tickSize'),\n 'base': self.safe_number(market, 'tickSize'),\n },\n 'limits': {\n 'leverage': {\n 'min': self.parse_number('1') if contract else None,\n 'max': maxLeverage if contract else None,\n },\n 'amount': {\n 'min': None,\n 'max': None if positionIsQuote else maxOrderQty,\n },\n 'price': {\n 'min': None,\n 'max': self.safe_number(market, 'maxPrice'),\n },\n 'cost': {\n 'min': None,\n 'max': maxOrderQty if positionIsQuote else None,\n },\n },\n 'info': market,\n })\n return result",
"def get_market_summary(self, market):\n return self.__call__('market', 'getmarketsummary', \n {'marketname': market})",
"def fetch_price():\n\n url = \"https://www.bitstamp.net/api/ticker/\"\n\n response = json.load(urllib2.urlopen(url))\n\n return {\"buy\": response['ask'], \"sell\": response['bid']}",
"def fetch_ticker(self, symbol: str, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = self.publicGetExchangesPairTicker(self.extend(request, params))\n return self.parse_ticker(response, market)",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n ticker = await self.publicGetTickerSymbol(self.extend(request, params))\n return self.parse_ticker(ticker, market)",
"def get_active_market_street(market):\r\n return market[-1]",
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.marketsGetSpotPairs(params)\n #\n # {\n # success: '1',\n # data: {\n # pairs: [\n # {\n # name: 'btc_jpy',\n # base_asset: 'btc',\n # quote_asset: 'jpy',\n # maker_fee_rate_base: '0',\n # taker_fee_rate_base: '0',\n # maker_fee_rate_quote: '-0.0002',\n # taker_fee_rate_quote: '0.0012',\n # unit_amount: '0.0001',\n # limit_max_amount: '1000',\n # market_max_amount: '10',\n # market_allowance_rate: '0.2',\n # price_digits: '0',\n # amount_digits: '4',\n # is_enabled: True,\n # stop_order: False,\n # stop_order_and_cancel: False\n # },\n # ...\n # ]\n # }\n # }\n #\n data = self.safe_value(response, 'data', {})\n pairs = self.safe_value(data, 'pairs', [])\n result = {}\n for i in range(0, len(pairs)):\n pair = pairs[i]\n marketId = self.safe_string(pair, 'name')\n market = self.safe_market(marketId)\n symbol = market['symbol']\n result[symbol] = {\n 'info': pair,\n 'symbol': symbol,\n 'maker': self.safe_number(pair, 'maker_fee_rate_quote'),\n 'taker': self.safe_number(pair, 'taker_fee_rate_quote'),\n 'percentage': True,\n 'tierBased': False,\n }\n return result",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'symbol': market['id'],\n }\n response = await self.publicGetInstrument(self.extend(request, params))\n ticker = self.safe_value(response, 0)\n if ticker is None:\n raise BadSymbol(self.id + ' fetchTicker() symbol ' + symbol + ' not found')\n return self.parse_ticker(ticker, market)",
"async def fetch_ticker(self, symbol: str, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n response = await self.publicGetPairTicker(self.extend(request, params))\n data = self.safe_value(response, 'data', {})\n return self.parse_ticker(data, market)",
"def _fetch_stock_page(*markets) -> bs4.BeautifulSoup:\n\n if len(markets) == 0:\n raise ValueError('No markets given')\n\n params = {\n 'Exchange' : 'NMF',\n 'SubSystem': 'Prices',\n 'Action' : 'GetMarket',\n 'app' : '/osakkeet',\n 'Market' : ','.join([x.value for x in markets]),\n # 'ext_xslt': '/nordicV3/inst_table_shares.xsl'\n }\n\n r = requests.get(_API_URL, params)\n response_text = r.text\n soup = bs4.BeautifulSoup(response_text, 'lxml')\n\n return soup",
"def get_option_market_data(self, symbol: str): \n return self.trader.fetch_option_market_data(symbol)",
"def getMyMarketOrders(self, empireID):\n d = {}\n marketOrdersDict = self.getMyDictInfo('marketOrders')\n for marketID, myMarketOrderDict in marketOrdersDict.iteritems():\n if self.systems[myMarketOrderDict['system']].myEmpire.id == empireID:\n d[marketID] = myMarketOrderDict\n return d",
"def get_marketplace(self, marketplace_id):\n return MarketplaceResource(self._config).get(marketplace_id)",
"def lookup(symbol):\n\n # Contact API\n try:\n api_key = app.config[\"API_KEY\"]\n response = requests.get(\n f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(symbol)}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n\n quote = response.json()\n\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"isotime\": datetime.datetime.utcnow().isoformat()\n }\n\n except (KeyError, TypeError, ValueError):\n return None",
"def getCurrencySymbol(id=None):",
"def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n symbol = urllib.parse.quote_plus(symbol)\n url = f\"https://cloud-sse.iexapis.com/stable/stock/{symbol}/quote?token={api_key}\"\n response = requests.get(url)\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"]\n }\n except (KeyError, TypeError, ValueError):\n return None",
"def returnTicker(self):\n ticker = {}\n t = self.dpay.rpc.get_ticker(api=\"market_history\")\n ticker = {'highest_bid': float(t['highest_bid']),\n 'latest': float(t[\"latest\"]),\n 'lowest_ask': float(t[\"lowest_ask\"]),\n 'percent_change': float(t[\"percent_change\"]),\n 'bbd_volume': t[\"bbd_volume\"],\n 'dpay_volume': t[\"dpay_volume\"]}\n return ticker",
"def Ticker(id=\"\",**kwargs):\n\n path = \"{base}{endpoint}{ID}\".format(base=API_URL,endpoint=API_ENDPOINTS['ticker'],ID=id)\n resp = requests.get(path,params=kwargs)\n return resp.json()",
"def lookup_symbol(symbol):\n\n try:\n res = requests.get(\"https://cloud.iexapis.com/stable/stock/\" +\n f\"{urllib.parse.quote_plus(symbol)}/quote?token={Config.API_KEY}\")\n res.raise_for_status()\n except requests.RequestException:\n return None\n\n try:\n quote = res.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n }\n except (KeyError, TypeError, ValueError):\n return None",
"def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n response = requests.get(f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(str(symbol))}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n flash(\"Please set API_KEY\", 'danger')\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"change\": quote[\"change\"],\n \"changePercent\": quote[\"changePercent\"],\n \"volume\": quote[\"volume\"],\n \"week52High\": quote[\"week52High\"],\n \"week52Low\": quote[\"week52Low\"],\n \"open\" :quote[\"open\"],\n \"high\" :quote['high'],\n \"low\" : quote[\"low\"]\n }\n except (KeyError, TypeError, ValueError):\n return None",
"def get_market_summaries(self):\n return self.__call__('markets', 'getmarketsummaries')",
"def public_market_summary(self, market_symbol):\n return self.get(f'markets/{market_symbol}/summary')",
"def fetch_trading_fees(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalance(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # ...\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # ...\n # }\n # }\n #\n fees = self.safe_value(response, 'Fees', {})\n keys = list(fees.keys())\n result = {}\n for i in range(0, len(keys)):\n marketId = keys[i]\n symbol = self.safe_symbol(marketId)\n fee = self.safe_value(fees, marketId)\n makerString = self.safe_string(fee, 'FeeMaker')\n takerString = self.safe_string(fee, 'FeeTaker')\n maker = self.parse_number(Precise.string_div(makerString, '100'))\n taker = self.parse_number(Precise.string_div(takerString, '100'))\n result[symbol] = {\n 'info': fee,\n 'symbol': symbol,\n 'taker': taker,\n 'maker': maker,\n 'percentage': True,\n 'tierBased': True,\n }\n return result",
"def get_market_cap(symbol):\n try:\n cap = pd_data.get_quote_yahoo(symbol)[\"marketCap\"]\n cap = cap[symbol]\n\n except:\n print(f\"Market cap for {symbol} not found.\")\n cap = \"n/a\"\n\n return cap",
"def gettickerdata(tickername):\n\n r = requests.get(constants.bloomurl + getticker(tickername) + ':US')\n soup = BeautifulSoup(r.text, 'html.parser')\n results = soup.find_all('div', class_=\"price\")\n return (\"$\" + results[0].text)",
"async def get_markets(self):\n uri = \"/v3/markets\"\n success, error = await self.request(\"GET\", uri)\n return success, error",
"def get_market_summary(**kwargs):\n url = '{}/v6/finance/quote/marketSummary'.format(BASE_URL)\n return _make_request(url, 'marketSummaryResponse', **kwargs)",
"def get_quote(access_token,ticker):\r\n quote_url = 'https://api.tdameritrade.com/v1/marketdata/{}/quotes'.format(ticker)\r\n\r\n #The header for getting a quote needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Make the get request to TD Ameritrade\r\n quote_data_json = requests.get(url=quote_url,headers=headers)\r\n return quote_data_json.json()",
"def ticker(self, param=None):\r\n\t\tdata = self._get('ticker/', query=param)\r\n\t\treturn data",
"def get_markets(self):\n self.logger.debug(\"Fetching markets.\")\n return self._api_query(\"markets\")['markets']",
"def market_model_id(self) -> str:\n return self.__market_model_id",
"def get_market_inf():\n r = requests.get(vs.DSE_URL+vs.DSE_MARKET_INF_URL)\n soup = BeautifulSoup(r.text, 'html.parser')\n quotes = [] # a list to store quotes\n\n table = soup.find('table', attrs={'class': 'table table-bordered background-white text-center', '_id': 'data-table'})\n\n for row in table.find_all('tr')[1:]:\n cols = row.find_all('td')\n quotes.append({'Date': cols[0].text.strip().replace(\",\", \"\"),\n 'Total Trade': cols[1].text.strip().replace(\",\", \"\"),\n 'Total Volume': cols[2].text.strip().replace(\",\", \"\"),\n 'Total Value (mn)': cols[3].text.strip().replace(\",\", \"\"),\n 'Total Market Cap. (mn)': cols[4].text.strip().replace(\",\", \"\"),\n 'DSEX Index': cols[5].text.strip().replace(\",\", \"\"),\n 'DSES Index': cols[6].text.strip().replace(\",\", \"\"),\n 'DS30 Index': cols[7].text.strip().replace(\",\", \"\"),\n 'DGEN Index': cols[8].text.strip().replace(\",\", \"\")\n })\n df = pd.DataFrame(quotes)\n return df",
"def get_markets(self):\n return self.__call__('markets', 'getmarkets')",
"def getPairs(self):\n jsonResponse = self.getJson(\"https://bittrex.com/api/v1.1/public/getmarkets\")\n allPairs = []\n for i in jsonResponse[\"result\"]:\n allPairs.append(i[\"MarketName\"])\n return allPairs",
"def on_market_info(self):\n pass",
"def search_for_contracts(self, market=None, buy_sell=None, type_=None, contracts=\"All\"):\n if not contracts:\n contracts = []\n if not type_:\n pass\n elif type_.lower() in ['yes', 'long'] and buy_sell == 'buy':\n type_ = {'long': 'BestBuyYesCost'}\n elif type_.lower() in ['no', 'short'] and buy_sell == 'buy':\n type_ = {'short': 'BestBuyNoCost'}\n elif type_.lower() in ['yes', 'long'] and buy_sell == 'sell':\n type_ = {'long': 'BestSellYesCost'}\n elif type_.lower() in ['no', 'short'] and buy_sell == 'sell':\n type_ = {'short': 'BestSellNoCost'}\n \n if not market:\n market_links = [(\"us_election\", 'https://www.predictit.org/api/marketdata/category/6'), (\"us_politics\", 'https://www.predictit.org/api/marketdata/category/13'), (\"world_politics\", 'https://www.predictit.org/api/marketdata/category/4')]\n elif 'us' and 'election' in market.replace('.', '').lower():\n market_links = [(\"us_elections\", 'https://www.predictit.org/api/marketdata/category/6')]\n elif 'us' and 'politic' in market.replace('.', '').lower():\n market_links = [(\"us_politics\",'https://www.predictit.org/api/marketdata/category/13')]\n elif 'world' in market.lower():\n market_links = [(\"world_politics\", 'https://www.predictit.org/api/marketdata/category/4')]\n \n \n\n market_data=[]\n for category, market_link in market_links:\n markets = list(self.browser.get(market_link).json()['Markets'])\n for market in markets:\n market = market\n market[\"Category\"] = category\n market[\"References\"]=[]\n wikidict={\"Trump\": \"http://dbpedia.org/resource/Donald_Trump\", \"Clinton\": \"http://dbpedia.org/resource/Hillary_Clinton\", \"Ossoff\": \"https://en.wikipedia.org/wiki/Jon_Ossoff\", \"Virginia\": \"https://en.wikipedia.org/wiki/Virginia\", \"Georgia\": \"https://en.wikipedia.org/wiki/Georgia_(U.S._state)\",\"Election\":\"https://en.wikipedia.org/wiki/Elections_in_the_United_States\"}\n \n for thing in [\"Trump\", \"Clinton\", \"Ossoff\", \"Virginia\", \"Georgia\",\"Election\"]:\n if thing.lower() in [element.lower() for element in market[\"Name\"].split()]:\n market[\"References\"].append(wikidict[thing])\n market_data.append(json.dumps(market))\n \n return market_data",
"def show_market(self, player):\n print('Market')\n print('------')\n cards_available = self.game.market.cards_available()\n for card in sorted(cards_available.keys()):\n count = cards_available[card]\n if card.cost > self.game.current_player.money:\n sys.stdout.write(colorama.Fore.WHITE)\n sys.stdout.write(colorama.Style.DIM)\n elif card.family == Card.FAMILY_MAJOR and player.has_card(card):\n sys.stdout.write(colorama.Fore.WHITE)\n sys.stdout.write(colorama.Style.DIM)\n else:\n sys.stdout.write(self.card_colorama(card))\n print(' * {}x {} {} ({}) - cost: {}'.format(count, card.activations, card, card.short_desc, card.cost))\n sys.stdout.write(colorama.Style.RESET_ALL)",
"def get_bear_by_id(self, base_url, bear_id):\n response = requests.get(url=f'{base_url}/bear/{bear_id}')\n return response",
"def get_ticker(self, pair):\r\n method = self.public_endpoints['ticker']['method']\r\n url = self.base_url + self.public_endpoints['ticker']['url'].format(pairId=pair)\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def lookup(symbol):\r\n \r\n # format url\r\n url = 'https://query1.finance.yahoo.com/v7/finance/options/{}'.format(symbol)\r\n \r\n # send HTTP request and JSONify\r\n page = requests.get(url, headers={'User-agent': 'Mozilla/5.0'}).json()\r\n if page['optionChain']['result'] == []:\r\n return 1\r\n \r\n # init dictionary\r\n info = {}\r\n \r\n # name\r\n try:\r\n info['name'] = page['optionChain']['result'][0]['quote']['shortName']\r\n except Exception as e:\r\n try:\r\n info['name'] = page['optionChain']['result'][0]['quote']['longName']\r\n except Exception as e:\r\n info['name'] = symbol\r\n \r\n # price\r\n try:\r\n info['price'] = page['optionChain']['result'][0]['quote']['regularMarketPrice']\r\n except Exception as e:\r\n return 2\r\n \r\n # is tradeable\r\n try:\r\n if page['optionChain']['result'][0]['quote']['quoteType'] != 'INDEX':\r\n info['tradeable'] = True\r\n except Exception as e:\r\n pass\r\n\r\n # market status\r\n try:\r\n page = page['optionChain']['result'][0]['quote']['marketState']\r\n if page != 'PRE' or page != 'NORMAL' or page != 'POST':\r\n info['marketState'] = 1\r\n except Exception as e:\r\n return 3\r\n \r\n info['symbol'] = symbol\r\n \r\n # success\r\n return info",
"def get_btc_ticker(self):\n return self.execute_http_call(\"/api/ticker\", \"GET\", headers=None)",
"def get_best_bid_ask(self, ticker):\n if symbol in self.symbol:\n bid = self.symbol[symbol][\"bid\"]\n ask = self.symbol[symbol][\"ask\"]\n return bid, ask\n else:\n print(\n \"Bid/ask values for ticker %s are not \"\n \"available from the PriceHandler.\" % symbol\n )\n return None, None",
"def get_sell_book(self, symbol=None, limit=100, offset=0): \r\n if symbol is None:\r\n sell_book = self.api.find(\"market\", \"sellBook\", query={\"account\": self.account}, limit=limit, offset=offset)\r\n else:\r\n sell_book = self.api.find(\"market\", \"sellBook\", query={\"symbol\": symbol, \"account\": self.account}, limit=limit, offset=offset)\r\n return sell_book",
"def public_market_history(self, market_symbol):\n return self.get(f'markets/{market_symbol}/trades')",
"def determine_basketball_outcome_from_api(market, params, enp_id):\n\n n_bet = 1\n outcome = None\n if market == BasketballMarkets.FULL_TIME_POINT_SPREAD:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n\n if selection == BasketballSelections.HOME_TEAM:\n hc_score = score_home + handicap\n if hc_score == score_away:\n outcome = 0\n elif hc_score > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n hc_score = score_away + handicap\n if hc_score == score_home:\n outcome = 0\n elif hc_score > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTPS bet should be ONE or TWO')\n\n elif market == BasketballMarkets.FULL_TIME_MONEYLINE:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n\n if selection == BasketballSelections.HOME_TEAM:\n if score_home == score_away:\n outcome = 0\n elif score_home > score_away:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.AWAY_TEAM:\n if score_away == score_home:\n outcome = 0\n elif score_away > score_home:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('selection should be ONE or TWO')\n elif market == BasketballMarkets.FULL_TIME_TOTAL_POINTS:\n enp_id_int = int(enp_id[3:])\n selection = params[0]\n handicap = params[1]\n response = data_api.get_event_outcome(Sports.BASKETBALL, enp_id_int)\n\n score_home = response.get(enp_id, {}).get('details', {}).get('teamAResult', -1)\n score_away = response.get(enp_id, {}).get('details', {}).get('teamBResult', -1)\n score_total = score_home + score_away\n\n if selection == BasketballSelections.OVER:\n if score_total == handicap:\n outcome = 0\n elif score_total > handicap:\n outcome = 1\n else:\n outcome = -1\n\n elif selection == BasketballSelections.UNDER:\n if score_total == handicap:\n outcome = 0\n elif score_total < handicap:\n outcome = 1\n else:\n outcome = -1\n\n else:\n raise ValueError('FTTP bet should be OVER or UNDER')\n else:\n raise ValueError('implement more markets')\n\n return outcome, n_bet",
"def fetch_markets_tickers(self):\r\n return self.__public_request('GET', '/api/v1/tickers')",
"def get(\n db: Session,\n req_body: schemas.Command,\n) -> schemas.Markets:\n if req_body.cmd != 'all':\n return schemas.Markets()\n markets = db.query(models.DesignMarket).all()\n\n out_markets_schema = schemas.Markets(\n names=[],\n )\n for market in markets:\n out_markets_schema.names.append(\n schemas.Market(\n name=market.name,\n display_name=market.display_name,\n )\n )\n return out_markets_schema",
"def get_company_info_for(symbol: str):\n baseurl = \"https://financialmodelingprep.com/api/v3/search\"\n params = {\"query\": symbol, \"apikey\": FMP_API_KEY, \"limit\": \"1\"}\n return make_request(baseurl=baseurl, params=params)",
"def getMarketsData(marketsField, output_type = None):\n try:\n _create_unverified_https_context = ssl._create_unverified_context\n except AttributeError:\n pass\n else:\n ssl._create_default_https_context = _create_unverified_https_context\n \n fields =['commodities', 'currency', 'index', 'bonds']\n if marketsField not in fields:\n raise ParametersError ('Accepted values for marketsField are \\'commodity\\', \\'currency\\', \\'index\\' or \\'bonds\\'.')\n linkAPI = 'https://api.tradingeconomics.com/markets/' + quote(marketsField) \n try:\n linkAPI = linkAPI + '?c=' + glob.apikey\n except AttributeError:\n raise LoginError('You need to do login before making any request')\n try: \n code = urlopen(linkAPI)\n code = code.getcode() \n webResults = json.loads(urlopen(linkAPI).read().decode('utf-8'))\n except ValueError:\n raise WebRequestError ('Something went wrong. Error code = ' + str(code)) \n if len(webResults) > 0:\n if marketsField == 'bonds':\n names = ['symbol','name', 'country', 'date', 'last', 'group','url','importance','dailychange','dailypercentualchange','weeklychange','weeklypercentualchange','monthlychange','monthlypercentualchange','yearlychange','yearlypercentualchange','ydtchange','ydtpercentualchange','yesterday','lastweek','lastmonth','lastyear','startyear']\n names2 = ['Symbol','Name', 'Country', 'Date', 'Last', 'Group','URL','Importance','DailyChange','DailyPercentualChange','WeeklyChange','WeeklyPercentualChange','MonthlyChange','MonthlyPercentualChange','YearlyChange','YearlyPercentualChange','YTDChange','YTDPercentualChange','yesterday','lastWeek','lastMonth','lastYear','startYear']\n else:\n names = ['symbol','ticker','name', 'country', 'date', 'last', 'group','url','importance','dailychange','dailypercentualchange','weeklychange','weeklypercentualchange','monthlychange','monthlypercentualchange','yearlychange','yearlypercentualchange','ydtchange','ydtpercentualchange','yesterday','lastweek','lastmonth','lastyear','startyear']\n names2 = ['Symbol','Ticker','Name', 'Country', 'Date', 'Last', 'Group','URL','Importance','DailyChange','DailyPercentualChange','WeeklyChange','WeeklyPercentualChange','MonthlyChange','MonthlyPercentualChange','YearlyChange','YearlyPercentualChange','YTDChange','YTDPercentualChange','yesterday','lastWeek','lastMonth','lastYear','startYear'] \n maindf = pd.DataFrame() \n for i in range(len(names)):\n names[i] = [d[names2[i]] for d in webResults]\n maindf = pd.concat([maindf, pd.DataFrame(names[i], columns = [names2[i]])], axis = 1)\n else:\n raise ParametersError ('No data available for the provided parameters.')\n if output_type == None or output_type =='df': \n output = maindf.dropna()\n elif output_type == 'raw': \n output = webResults\n else: \n raise ParametersError ('output_type options : df(defoult) for data frame or raw for unparsed results.') \n return output",
"def book_ticker(self, symbol=''):\n params = {\n 'symbol': symbol,\n }\n return self._quote_get('ticker/bookTicker', params=params)",
"def get_crypto_info(symbol, info=None):\n url = urls.crypto_currency_pairs()\n data = helper.request_get(url, 'results')\n data = [x for x in data if x['asset_currency']['code'] == symbol]\n if len(data) > 0:\n data = data[0]\n else:\n data = None\n return(helper.filter(data, info))",
"def public_markets(self):\n return self.get('markets')",
"async def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n await self.load_markets()\n market = self.market(symbol)\n request = {\n 'order_id': id,\n 'pair': market['id'],\n }\n response = await self.privateGetUserSpotOrder(self.extend(request, params))\n data = self.safe_value(response, 'data')\n return self.parse_order(data, market)",
"def update_TradeHistory(self, market):\n ##self.marketid is to do!!!\n mid = self.marketid(market)\n history = self.Request.fetch('markettrades',params={'marketid':mid})\n pair = self.Pairs[mid]\n self.TradeHistory[pair] = history\n return 0",
"def get_ticker_book(self, symbol: Symbol):\n api_params = {\n \"symbol\": symbol.value\n }\n\n return self.request.get(path='/ticker/book', params=api_params)",
"def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()",
"def get_trade(self, id: int) -> TradeOffer | None:\n return self._connection.get_trade(id)",
"def fetch_balance(self, params={}):\n self.load_markets()\n response = self.privateGetAccountBalanceV2(params)\n #\n # {\n # \"AVAILABLE_NIS\": 0.0,\n # \"NIS\": 0.0,\n # \"LOCKED_NIS\": 0.0,\n # \"AVAILABLE_BTC\": 0.0,\n # \"BTC\": 0.0,\n # \"LOCKED_BTC\": 0.0,\n # \"AVAILABLE_ETH\": 0.0,\n # \"ETH\": 0.0,\n # \"LOCKED_ETH\": 0.0,\n # \"AVAILABLE_BCHSV\": 0.0,\n # \"BCHSV\": 0.0,\n # \"LOCKED_BCHSV\": 0.0,\n # \"AVAILABLE_BCHABC\": 0.0,\n # \"BCHABC\": 0.0,\n # \"LOCKED_BCHABC\": 0.0,\n # \"AVAILABLE_LTC\": 0.0,\n # \"LTC\": 0.0,\n # \"LOCKED_LTC\": 0.0,\n # \"AVAILABLE_ETC\": 0.0,\n # \"ETC\": 0.0,\n # \"LOCKED_ETC\": 0.0,\n # \"AVAILABLE_BTG\": 0.0,\n # \"BTG\": 0.0,\n # \"LOCKED_BTG\": 0.0,\n # \"AVAILABLE_GRIN\": 0.0,\n # \"GRIN\": 0.0,\n # \"LOCKED_GRIN\": 0.0,\n # \"Fees\": {\n # \"BtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EthNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchabcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"EtcNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BtgNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"LtcBtc\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"BchsvNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0},\n # \"GrinNis\": {\"FeeMaker\": 1.0, \"FeeTaker\": 1.0}\n # }\n # }\n #\n return self.parse_balance(response)",
"def test_market_id(self):\n\n result = self.client.get('/market/abc')\n self.assertEqual(404, result.status_code)\n self.assertIn('Farmer\\'s market abc is not a valid format. Use digits only.', result.data)",
"def __call__(self, market=None):\n\t\tif self.db_flag:\n\t\t\tif market:\n\t\t\t\treturn self.db.find_one({'_id': market})\n\t\t\treturn list(self.db.find())",
"async def get_active_exchange_markets(cls) -> pd.DataFrame:\n async with aiohttp.ClientSession() as client:\n\n trading_pairs_response = await client.get(ASSET_PAIRS_URL)\n trading_pairs_response: aiohttp.ClientResponse = trading_pairs_response\n\n if trading_pairs_response.status != 200:\n raise IOError(f\"Error fetching Kraken trading pairs. \"\n f\"HTTP status is {trading_pairs_response.status}.\")\n\n trading_pairs_data: Dict[str, Any] = await trading_pairs_response.json()\n trading_pairs_data[\"result\"] = {\n pair: details for pair, details in trading_pairs_data[\"result\"].items() if \".\" not in pair}\n\n wsname_dict: Dict[str, str] = {pair: details[\"wsname\"]\n for pair, details in trading_pairs_data[\"result\"].items()}\n trading_pairs: Dict[str, Any] = {pair: {\"baseAsset\": wsname_dict[pair].split(\"/\")[0],\n \"quoteAsset\": wsname_dict[pair].split(\"/\")[1],\n \"wsname\": wsname_dict[pair]}\n for pair in trading_pairs_data[\"result\"]}\n\n trading_pairs_str: str = ','.join(trading_pairs.keys())\n\n market_response = await client.get(f\"{TICKER_URL}?pair={trading_pairs_str}\")\n market_response: aiohttp.ClientResponse = market_response\n\n if market_response.status != 200:\n raise IOError(f\"Error fetching Kraken markets information. \"\n f\"HTTP status is {market_response.status}.\")\n\n market_data = await market_response.json()\n\n market_data: List[Dict[str, Any]] = [{\"pair\": pair, **market_data[\"result\"][pair], **trading_pairs[pair]}\n for pair in market_data[\"result\"]\n if pair in trading_pairs]\n\n # Build the data frame.\n all_markets: pd.DataFrame = pd.DataFrame.from_records(data=market_data, index=\"pair\")\n all_markets[\"lastPrice\"] = all_markets.c.map(lambda x: x[0]).astype(\"float\")\n all_markets.loc[:, \"volume\"] = all_markets.v.map(lambda x: x[1]).astype(\"float\")\n\n price_dict: Dict[str, float] = await cls.get_prices_from_df(all_markets)\n\n usd_volume: List[float] = [\n (\n baseVolume * price_dict[baseAsset] if baseAsset in price_dict else -1\n )\n for baseAsset, baseVolume in zip(all_markets.baseAsset,\n all_markets.volume)]\n all_markets.loc[:, \"USDVolume\"] = usd_volume\n\n return all_markets.sort_values(\"USDVolume\", ascending=False)",
"async def fetch_trade(self, id: int) -> TradeOffer | None:\n return await self._connection.fetch_trade(id)",
"def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'id': id,\n }\n response = self.privateGetOrderGetById(self.extend(request, params))\n #\n # {\n # \"pair\": \"BtcNis\",\n # \"status\": \"Completed\",\n # \"created\": 1666689837,\n # \"type\": 0,\n # \"order_type\": 0,\n # \"amount\": 0.00000000,\n # \"price\": 50000.00000000,\n # \"stop\": 0,\n # \"id\": 10951473,\n # \"initialAmount\": 2.00000000\n # }\n #\n return self.parse_order(response, market)",
"def __get_place_of_listing(instrument):\n return instrument['market']",
"def market(self, market):\n self._market = market",
"def getBid(self):\r\n\t\treturn self.data['bid']",
"def get_chain_info(self, symbol: str): \n return self.trader.fetch_chain_info(symbol)",
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privatePostAuthRSummary(params)\n #\n # Response Spec:\n # [\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # [\n # [\n # MAKER_FEE,\n # MAKER_FEE,\n # MAKER_FEE,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_REBATE\n # ],\n # [\n # TAKER_FEE_TO_CRYPTO,\n # TAKER_FEE_TO_STABLE,\n # TAKER_FEE_TO_FIAT,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_TAKER_FEE\n # ]\n # ],\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # {\n # LEO_LEV,\n # LEO_AMOUNT_AVG\n # }\n # ]\n #\n # Example response:\n #\n # [\n # null,\n # null,\n # null,\n # null,\n # [\n # [0.001, 0.001, 0.001, null, null, 0.0002],\n # [0.002, 0.002, 0.002, null, null, 0.00065]\n # ],\n # [\n # [\n # {\n # curr: 'Total(USD)',\n # vol: '0',\n # vol_safe: '0',\n # vol_maker: '0',\n # vol_BFX: '0',\n # vol_BFX_safe: '0',\n # vol_BFX_maker: '0'\n # }\n # ],\n # {},\n # 0\n # ],\n # [null, {}, 0],\n # null,\n # null,\n # {leo_lev: '0', leo_amount_avg: '0'}\n # ]\n #\n result = {}\n fiat = self.safe_value(self.options, 'fiat', {})\n feeData = self.safe_value(response, 4, [])\n makerData = self.safe_value(feeData, 0, [])\n takerData = self.safe_value(feeData, 1, [])\n makerFee = self.safe_number(makerData, 0)\n makerFeeFiat = self.safe_number(makerData, 2)\n makerFeeDeriv = self.safe_number(makerData, 5)\n takerFee = self.safe_number(takerData, 0)\n takerFeeFiat = self.safe_number(takerData, 2)\n takerFeeDeriv = self.safe_number(takerData, 5)\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n market = self.market(symbol)\n fee = {\n 'info': response,\n 'symbol': symbol,\n 'percentage': True,\n 'tierBased': True,\n }\n if market['quote'] in fiat:\n fee['maker'] = makerFeeFiat\n fee['taker'] = takerFeeFiat\n elif market['contract']:\n fee['maker'] = makerFeeDeriv\n fee['taker'] = takerFeeDeriv\n else: # TODO check if stable coin\n fee['maker'] = makerFee\n fee['taker'] = takerFee\n result[symbol] = fee\n return result",
"def update_my_contracts(self):\n my_shares = self.browser.get('https://www.predictit.org/Profile/GetSharesAjax')\n for market in my_shares.soup.find_all('table', class_='table table-striped table-center'):\n market_title = market.previous_element.previous_element.find('div', class_='outcome-title').find('a').get(\n 'title')\n for contract in self.my_contracts:\n if market_title == contract.market:\n market_data = [i.text.strip().replace(\n \"\\n\", \"\").replace(\" \", \"\").replace('\\r', '') for i in market.find_all('td')]\n market_data_lists = [market_data[x:x + 10] for x in range(0, len(market_data), 10)]\n cid = None\n for list_ in market_data_lists:\n parsed_market_data = [market_title]\n for string in list_:\n try:\n cid = re.search(\n pattern='#\\w+\\-(\\d+)', string=string\n ).group(1)\n string = re.search(\n pattern='(.*)\\$\\(.*\\)\\;', string=string\n ).group(1)\n except AttributeError:\n pass\n parsed_market_data.append(string)\n parsed_market_data.insert(1, cid)\n self.timestamp = datetime.datetime.now()\n self.avg_price = parsed_market_data[5]\n self.gain_loss = parsed_market_data[8]\n self.latest = parsed_market_data[9]\n self.buy = parsed_market_data[-2]\n self.sell = parsed_market_data[-1]\n else:\n continue",
"def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value",
"def fmarket_download():\r\n name = request.args[\"address\"]\r\n markets = get_zipcode_markets(name)\r\n\r\n return Response(markets.to_json(), 200, mimetype=\"application/json\")",
"def markets(self, irc, msg, args):\n locationIDs = self._sql(\"\"\"\n SELECT \"locationID\" FROM evecentral_market\"\"\", None, single=False)\n if len(locationIDs) == 0:\n irc.reply('No prices have been indexed yet.', prefixNick=False)\n return\n output = []\n for locationID in locationIDs:\n locationID = locationID[0]\n location = self._get_location(locationID)\n if locationID < 30000000:\n # This would be a region\n output.append(ircutils.bold(location['itemName']))\n else:\n output.append(self._colorize_system(location))\n irc.reply(', '.join(output), prefixNick=False)",
"def getCurrencyName(id=None):",
"def get_coin_data(coin):\n url = URL + f\"ticker/{coin}\"\n data = get_url(url)\n return data",
"def get_market_price(self, exchange, pair, type):\n return self.ccxt.get_market_price(exchange, pair, type)",
"def get_market_data_via_celery(market_request):\n\n return tca_ticker_loader.get_market_data(market_request)",
"def ticker(self):\n response = self.query('ticker')\n return response",
"async def eth(ctx):\r\n await ctx.message.delete()\r\n r = requests.get(\r\n \"https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,GBP\"\r\n )\r\n r = r.json()\r\n usd = r[\"USD\"]\r\n eur = r[\"EUR\"]\r\n gbp = r[\"GBP\"]\r\n em = discord.Embed(\r\n description=f\"USD: `{str(usd)}$`\\nEUR: `{str(eur)}€`\\n\\nGBP: `{str(gbp)}£`\"\r\n )\r\n em.set_author(\r\n name=\"Ethereum\",\r\n icon_url=\"https://cdn.discordapp.com/attachments/271256875205525504/374282740218200064/2000px-Ethereum_logo.png\",\r\n )\r\n await ctx.send(embed=em)",
"def get_target_market():\n return choice(GraphParameters.TARGET_MARKETS)",
"def bloomberg(site):\n uri = \"https://www.bloomberg.com/markets/api/bulk-time-series/price/\"\n endpoint = (\n \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n )\n url = uri + endpoint\n headers = {\n \"authority\": \"www.bloomberg.com\",\n \"method\": \"GET\",\n \"path\": (\n \"/markets/api/comparison/data?securities=\"\n + \"USDCNY%3ACUR,USDRUB%3ACUR,USDJPY%3ACUR,USDEUR%3ACUR,USDKRW%3ACUR\"\n + \",XAUUSD%3ACUR,XAGUSD%3ACUR\"\n + \"&securityType=CURRENCY&locale=en\"\n ),\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/\"\n + \"webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"cookie\": (\n \"bbAbVisits=1; _pxhd=e24b47c64d37711c147cfb3c4b35c845563d2f9831b\"\n + \"03d9189f8cd761bc2be4f:d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _px\"\n + \"vid=d78eeb01-34c9-11ea-8f86-51d2aad9afb3; _reg-csrf=s%3Ab0pWvbcs\"\n + \"UtrjYeJ0T2GrTaaD.8kaQlvHchJ1D%2FZZMaQWQiTizJTxrqqyzzuEZHEvlQNw;\"\n + \" agent_id=7989385a-d6d9-4446-b7aa-3c937407862b;\"\n + \" session_id=5702901e-d5fe-41e7-b259-df46322015e0;\"\n + \" session_key=3179869387f4c4ec4385e0d16222f0e59f48c47f;\"\n + \" _user-status=anonymous; _is-ip-whitelisted=false;\"\n + \" _user-ip=91.132.137.116; trc_cookie_storage=taboola%2520global%253\"\n + \"Auser-id%3D2f4acdc6-7c3c-412c-8766-d9c80dcffc38-tuct513df3e;\"\n + \" bdfpc=004.0586371899.1578785723722;\"\n + \" _reg-csrf-token=4ZxUa9q8-fkNXQkoHHXhnobWne1sDlIVcKEQ\"\n ),\n \"dnt\": \"1\",\n \"if-none-match\": 'W/\"lZU52eQYxjadyNKGCyftEg==\"',\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\"\n + \" (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()\n data = {}\n for item in ret:\n symbol = item[\"id\"].replace(\":CUR\", \"\")\n symbol = symbol[:3] + \":\" + symbol[-3:]\n data[symbol] = float(item[\"lastPrice\"])\n data[\"USD:XAG\"] = 1 / data.pop(\"XAG:USD\")\n data[\"USD:XAU\"] = 1 / data.pop(\"XAU:USD\")\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")",
"def get_my_contracts(self):\n self.my_contracts = []\n my_shares = self.browser.get('https://www.predictit.org/Profile/GetSharesAjax')\n for market in my_shares.soup.find_all('table', class_='table table-striped table-center'):\n market_title = market.previous_element.previous_element.find('div', class_='outcome-title').find('a').get(\n 'title')\n market_data = [i.text.strip().replace(\n \"\\n\", \"\").replace(\" \", \"\").replace('\\r', '') for i in market.find_all('td')]\n market_data_lists = [market_data[x:x + 10] for x in range(0, len(market_data), 10)]\n cid = None\n for list_ in market_data_lists:\n parsed_market_data = [market_title]\n for string in list_:\n try:\n cid = re.search(\n pattern='#\\w+\\-(\\d+)', string=string\n ).group(1)\n string = re.search(\n pattern='(.*)\\$\\(.*\\)\\;', string=string\n ).group(1)\n except AttributeError:\n pass\n parsed_market_data.append(string)\n for line in urlopen('https://www.predictit.org/Contract/'+ str(cid) + '/#data').read().splitlines():\n if 'ChartTicker' in str(line):\n ticker = re.search(pattern=\"= '(.*)';\", string=str(line)).group(1)\n break\n parsed_market_data.insert(1, cid)\n parsed_market_data.append(ticker)\n contract = Contract(*parsed_market_data)\n self.my_contracts.append(contract)",
"def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):\n self.load_markets()\n market = self.market(symbol)\n request = {\n 'pair': market['id'],\n }\n orderbook = self.publicGetExchangesPairOrderbook(self.extend(request, params))\n return self.parse_order_book(orderbook, symbol)",
"def get_product_ticker(self, product_id):\n\t\tr = requests.get(self.url + '/products/{}/ticker'.format(product_id), timeout=30)\n # r.raise_for_status()\n\t\treturn r.json()"
] |
[
"0.7296463",
"0.6786435",
"0.64976203",
"0.645441",
"0.6436033",
"0.6377452",
"0.6353565",
"0.6345084",
"0.6292913",
"0.6270462",
"0.6263126",
"0.62144077",
"0.6168539",
"0.6146066",
"0.6135332",
"0.6084566",
"0.6076271",
"0.6071816",
"0.6056507",
"0.6041295",
"0.5983031",
"0.5908562",
"0.5906014",
"0.5894554",
"0.58888024",
"0.58542126",
"0.5834648",
"0.5819151",
"0.5746868",
"0.5738209",
"0.57363003",
"0.5725606",
"0.5655274",
"0.5649479",
"0.56413907",
"0.5611012",
"0.5597976",
"0.55926734",
"0.5585114",
"0.5564691",
"0.55631524",
"0.5530418",
"0.5523995",
"0.5518695",
"0.55183786",
"0.550843",
"0.54937863",
"0.5492183",
"0.5480066",
"0.5454454",
"0.5436025",
"0.5424318",
"0.54175764",
"0.53975743",
"0.53472286",
"0.53446066",
"0.53400177",
"0.5335599",
"0.5321906",
"0.5315473",
"0.5294631",
"0.5285212",
"0.5283971",
"0.5270775",
"0.5264676",
"0.5263226",
"0.52608037",
"0.52569443",
"0.52524143",
"0.52516",
"0.5243724",
"0.5238646",
"0.5237318",
"0.5227902",
"0.52212286",
"0.52160895",
"0.5214115",
"0.5213936",
"0.5201132",
"0.5177162",
"0.5160034",
"0.5155197",
"0.5154429",
"0.5150631",
"0.5148575",
"0.5137887",
"0.51264286",
"0.5123259",
"0.5115506",
"0.51145333",
"0.5113651",
"0.51124465",
"0.51089364",
"0.51081824",
"0.51080084",
"0.5104881",
"0.50996995",
"0.50970906",
"0.50957",
"0.5086316"
] |
0.74244684
|
0
|
Start the Betfair ladder stream.
|
def start_betfair_ladder_stream(
self, market_id: str, conflate_ms: float
) -> queue.Queue:
if self.stream is not None:
logger.info(
"There is already a Betfair market stream running. Before "
"starting a new stream, the existing one must be stopped."
)
ladder_queue = self.stream.listener.output_queue
return ladder_queue
logger.info("Initialising output queue.")
ladder_queue = queue.Queue()
logger.info("Initialising Betfair stream listener.")
listener = betfairlightweight.StreamListener(ladder_queue)
logger.info("Creating the Betfair market stream.")
stream = self._client.streaming.create_stream(listener=listener)
logger.info("Setting the market filter to market_id=%s.", market_id)
market_filter_ = streaming_market_filter(market_ids=[market_id])
logger.info("Initialising streaming market data filter.")
market_data_filter = streaming_market_data_filter(
# fields=['EX_MARKET_DEF', 'EX_ALL_OFFERS'], # Without virtual bets
fields=['EX_MARKET_DEF', 'EX_BEST_OFFERS_DISP'], # With virtual bets
ladder_levels=10
)
logger.info("Subscribing to the market.")
stream.subscribe_to_markets(
market_filter=market_filter_,
market_data_filter=market_data_filter,
conflate_ms=min(conflate_ms, 120000),
)
logger.info("Starting the Betfair market stream.")
stream.start(async_=True)
self.stream = stream
return ladder_queue
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def start(self):\n\t\tself.stream.start_stream()",
"def start_stream(self):\n pass",
"def start(self) -> None:\n self._stream.start()",
"def start(self):\n self.stream.start()\n self.running = True\n self.update()",
"def run(self):\n self.arbiter.start()",
"def start_stream(self):\n self.handle = lt.add_magnet_uri(self.lt_ses, self.queue[0].magnet_link, # pylint: disable=no-member\n self.params)\n self.handle.set_sequential_download(True)\n\n self.stream_thread = threading.Thread(target=self._stream,\n name='stream')\n self.stream_thread.start()",
"def start(self, state):\n return self.brain.start(state)",
"def start():",
"def start():",
"def start():",
"def start():",
"def run(self, stream):\n pass",
"def start_hik(self, event):\n self.camdata.start_stream()",
"def start(self) -> None:",
"def start(self) -> None:",
"def starting_stream(self, stream):\n self.cur_stream_observations = 0\n self.stream = stream",
"def start():\n\n log(\"Starting...\")\n\n retrieve_environment(\"ETFTradeConfigSettings\", \"FilterConfig\")\n initialize_globals()\n for trade_filter in filter_list_from_config:\n ael.TradeFilter[trade_filter].trades().subscribe(trade_cb)\n # ael.Trade.subscribe(trade_cb, None)\n\n log(\"output_path: \" + path)\n log(\"Started in main.\")",
"def main():\n import time\n import argparse\n\n from bcipy.acquisition.datastream.generator import file_data, random_data\n\n default_channels = ['ch' + str(i + 1) for i in range(16)]\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filename', default=None,\n help=\"file containing data to be streamed; \"\n \"if missing, random data will be served.\")\n parser.add_argument('-c', '--channels',\n default=','.join(default_channels),\n help='comma-delimited list')\n parser.add_argument('-s', '--sample_rate', default='256',\n help='sample rate in hz')\n\n parser.add_argument('-m', '--markers', action=\"store_true\", default=False)\n parser.add_argument('-n', '--name', default='LSL')\n args = parser.parse_args()\n\n params = {'channels': args.channels.split(','),\n 'hz': int(args.sample_rate)}\n\n # Generate data from the file if provided, otherwise random data.\n generator = file_data(filename=args.filename) if args.filename \\\n else random_data(channel_count=len(params['channels']))\n\n markers = True if args.markers else False\n try:\n server = LslDataServer(params=params, generator=generator,\n add_markers=markers, name=args.name)\n\n log.debug(\"New server created\")\n server.start()\n log.debug(\"Server started\")\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n server.stop()",
"def start(self):\n self.p.start()",
"def start (self):\n pass",
"def start (self):\n pass",
"def start(self):\r\n start_thread(self._extract_thread_func, \"message sorter thread\")\r\n self.debug(\"### initialized stream sorter with %g s time window\"\r\n % (self.delay))",
"def start(self) -> None:\n ...",
"def start(self) -> None:\n ...",
"def start(self, sniffer):\n pass",
"def start_flow(consumer_key, consumer_secret_key,\n access_token, access_secret_token,\n languages, track, locations, data_base):\n\n logger.info('Initializing listener')\n # Instantiate listener\n l = StdoutListener(data_base)\n\n logger.info('Authorization')\n auth = OAuthHandler(consumer_key, consumer_secret_key)\n auth.set_access_token(access_token, access_secret_token)\n\n # Start data stream\n logger.info('Beginning streaming')\n stream = Stream(auth, l)\n stream.filter(track=track,\n languages=languages,\n locations=locations)",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def start(self):\n pass",
"def run(self):\n self.started()",
"def open(self):\n if self.__stream is None:\n self.__open() # instantiate stream object\n self.__stream.start_stream() # reactivate collecting samples",
"def start(self):\r\n pass",
"def start(self):\n self._state = 'Started'",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n # Print start message\n print(\">> Lab 4B - LIDAR Wall Following\")",
"def _start(self):\n pass",
"def start(self):\n ...",
"def stop_betfair_ladder_stream(self) -> None:\n if self.stream is not None:\n logger.info(\"Stopping the Betfair market stream.\")\n self.stream.stop()\n self.stream.listener.output_queue.put(\"Terminate\")\n self.stream = None\n else:\n logger.info(\"No existing Betfair market stream to stop.\")",
"def run(self):\n self.p = pyaudio.PyAudio()\n self.stream = self.p.open(format=pyaudio.paFloat32, channels=self.CHANNELS, rate=self.RATE, input=True,\n output=False, stream_callback=self.callback)\n self.stream.start_stream()\n self.stop.setSingleShot(True)\n self.stop.start()",
"def start(self):\n self.j_pump.start()\n return self",
"def feed(self):\n self.phase.set(2)\n # Start with first player\n #self.first_player\n pass",
"def start(self) -> None:\n self.__enter__()",
"def start(self):\n self._is_waiting = False",
"def start(self):\r\n self.debug(\"### starting gox streaming API, trading %s%s\" %\r\n (self.curr_base, self.curr_quote))\r\n self.client.start()",
"def activate(self):\n self.start()",
"def bcp_ball_start(self, **kwargs):\n self.events.post('ball_started', **kwargs)",
"def start(self):\n if self._pumping:\n return\n self._pumping = True\n self._global_reactor.callLater(0, self._pump_once)",
"def start(self):\n raise NotImplementedError",
"def start(self):\n raise NotImplementedError",
"def start(self):\n raise NotImplementedError",
"def startUpdates(self):\r\n # Analytics stream\r\n self.blptsAnalytics = blpapiwrapper.BLPTS()\r\n self.streamWatcherAnalytics = StreamWatcher(self, BloombergQuery.ANALYTICS)\r\n self.blptsAnalytics.register(self.streamWatcherAnalytics)\r\n # Price only stream\r\n self.blptsPriceOnly = blpapiwrapper.BLPTS()\r\n self.streamWatcherPriceOnly = StreamWatcher(self, BloombergQuery.PRICEONLY)\r\n self.blptsPriceOnly.register(self.streamWatcherPriceOnly)\r\n # Price change subscription\r\n self.streamWatcherBID = StreamWatcher(self,BloombergQuery.BID)\r\n self.bbgstreamBIDEM = blpapiwrapper.BLPStream(list((self.embondsisins + BBGHand + ' Corp').astype(str)), 'BID', 0)\r\n self.bbgstreamBIDEM.register(self.streamWatcherBID)\r\n self.bbgstreamBIDEM.start()\r\n # Risk free bonds: no streaming as too many updates - poll every 15 minutes\r\n rfRequest = blpapiwrapper.BLPTS(list((self.rfbondsisins + '@CBBT' + ' Corp').astype(str)), self.bbgPriceRFQuery)\r\n self.RFtimer = RFdata(900, rfRequest, self)\r\n self.BDMdata = BDMdata(900, self) #15 MINUTES\r\n self.BDMEODsave = BDMEODsave(self)",
"def start(self, add_runs=False, go=True):\r\n self.hysplit(add_runs, go)",
"def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)",
"def start_landscaper(self):\n self._initialise_landscaper()\n if self.conf_manager.get_flush():\n self.state = \"building\"\n self.graph_db.delete_all()\n self._initilise_graph_db()\n self._start_listeners()",
"def start(self):\n state_thread = threading.Thread(target=self._parse_block_queue)\n state_thread.daemon = True\n state_thread.start()",
"def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()",
"def start_traffic(self):\n raise NotImplementedError",
"def start() -> None:\n\n # PREPARE\n clone_game_files()\n\n # SIMULATE\n turns = run.simulation()\n\n # LOG\n logs = read.combine_logs(turns)\n\n # CALCULATE\n results = calculate.results(logs)\n\n # DISPLAY\n visualize.charts(results)\n\n # CLEAN\n remove_cloned_files()",
"def start(self):\n self.recording = True",
"def streamer(self):\n retry = 3\n print ('start streamer!')\n while self.container is None and 0 < retry:\n if not self.collect_frames:\n break\n #print (type(self.container))\n retry -= 1\n try:\n self.container = av.open(self.drone.get_video_stream())\n print('success')\n except av.AVError as ave:\n print(ave)\n print('retry...')",
"def start():\n trio.run(_main)",
"def start(self):\n self.listener.start()\n # No need to start broadcaster, it just sends when necessary",
"def main()->None:\n #Define key words\n keyword_list = ['#DataScience', '#MachineLearning', '#artificialintelligence', '#AI', '#ai', '#machinelearning',\n '#deeplearning', 'DeepLearning', '#ML', '#ArtificialIntelligence', '#machinelearning',\n 'DigitalTransformation'] # track list\n\n #Initiate Time\n start_time = time.time() # grabs the system time\n print(\"Launch! \\n\")\n\n #Listen to twitter\n twitterStream = Stream(Authentification(), listener.Listener(start_time, time_limit=3600)) # initialize Stream object with a time out limit\n twitterStream.filter(track=keyword_list, languages=['en']) # call the filter method to run the Stream Object\n print('Exctraction from twitter succesful')",
"def start(self):\n self.setup_initializer()\n self.setup_fader()\n self.fade_out_duration = 1.2",
"def _start(self):",
"def start(self, **kwargs):\n pass",
"def start(self, **kwargs):\n pass",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n # Print start message\n print(\">> Lab 3B - Depth Camera Cone Parking\")",
"def _start(self):\n\n _log.debug(\"Pipeline {} launching run components\".format(self.id))\n self._start_time = time.time()\n for run in self.runs:\n run.start()\n if run.sleep_after:\n time.sleep(run.sleep_after)",
"def _start_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 1\n self.regs.CSTREAM_CFG = 1",
"def start(self):\n \n self.thread.start()\n self.state = \"running\"",
"def run(self):\n \n rospy.spin()",
"def start_streaming_offboard_points(self):\n def run_streaming():\n self.offboard_point_streaming = True\n while (not rospy.is_shutdown()) and self.offboard_point_streaming:\n # Publish commands\n if (self.vel_setpoint_bu_lenu__lenu is not None):\n\n # limit speed for safety\n velsp_limited = deepcopy(self.vel_setpoint_bu_lenu__lenu)\n speed = np.linalg.norm([velsp_limited.linear.x,\n velsp_limited.linear.y,\n velsp_limited.linear.z])\n if speed > Constants.MAX_SPEED:\n rospy.logwarn(\"Velocity setpoint too high! Limiting speed to {} m/s\".format(Constants.MAX_SPEED))\n velsp_limited.linear.x *= Constants.MAX_SPEED/speed\n velsp_limited.linear.y *= Constants.MAX_SPEED/speed\n velsp_limited.linear.z *= Constants.MAX_SPEED/speed\n\n\n # Publish limited setpoint\n self.vel_setpoint_pub.publish(velsp_limited)\n\n self.rate.sleep()\n\n self.offboard_point_streaming_thread = threading.Thread(target=run_streaming)\n self.offboard_point_streaming_thread.start()",
"def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True",
"def start(self):\n start_time = time()\n\n def callback(res):\n env, data, game_id = res.result()\n if env is None:\n logger.info('invalid data: {}'.format(game_id))\n return\n\n self.save_data(data, game_id)\n logger.debug(f\"game {game_id}\"\n f\"halfmoves={env.num_halfmoves:3} {env.winner:12}\"\n f\"{' by resign ' if env.resigned else ' '}\"\n f\"{env.observation.split(' ')[0]}\")\n\n with ProcessPoolExecutor(max_workers=3) as executor:\n games = self.get_games_from_all_files()\n\n # poisoned reference (memleak)\n for i, game in enumerate(games):\n job = executor.submit(get_buffer, self.config, game, len(games), i)\n job.add_done_callback(callback)\n # for res in as_completed([executor.submit(get_buffer, self.config, game, len(games), i) for i, game in enumerate(games)]):",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n raise NotImplementedError()",
"def start(self):\n\t\tself.distributedVarPart()\n\t\twhile self.t < self.T:\n\t\t\tself.t += 1\n\t\t\tfor cluster in self.clusterList:\n\t\t\t\tprint(\"cluster: \")\n\t\t\t\tprint(cluster)\n\t\t\t\tprint(self.clusterList[cluster])\n\t\t\t\tself.C[cluster] = self.average_consensus(self.clusterList[cluster])\n\t\t\tprint(self.C)\n\t\t\tprint(\"turn t=%s: Centroids%s\"% (self.t, self.C), flush=True)\n\t\t\ttime.sleep(1)",
"def _start_live(self):\n StartLiveData(FromNow=False, FromTime=False, FromStartOfRun=True, UpdateEvery=1.0,\n Instrument=\"ISIS_Kafka_Event\", RunTransitionBehavior=\"Stop\", OutputWorkspace=self.outputWs,\n Address=self.listenerHost, PreserveEvents=True, AccumulationMethod=\"Add\",\n InstrumentName=self.instrumentName)\n\n # Grab most recent live data algorithm handle\n self._monitorLiveDataHandle = api.AlgorithmManagerImpl.Instance().newestInstanceOf(\"MonitorLiveData\")",
"def run(self):\n t = Tailer(self.channel, self.writer)\n t.start_tail()",
"def main():\n first_notes_and_song()\n bad_singing()\n using_a_sensor_to_block()",
"async def run(self):\n\n\t\tawait asyncio.sleep(self.delay)\n\t\tR_load = self.lock.mag/(self.sense - self.lock.mag)*self.R_ref\n\t\tawait self.resistance.push(R_load)\n\t\tawait self.current.push(self.lock.dc/(self.R_ref+R_load))\n\t\tawait self.voltage.push(self.lock.dc*R_load/(self.R_ref+R_load))\n\n\t\tlogger.debug(\"Stream has filled {} of {} points\".format(self.resistance.points_taken,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.resistance.num_points() ))\n\n\t\t#await asyncio.sleep(2*self.integration_time) # Give the filters some time to catch up?",
"def start_bath(self) -> None:\n\n self.send(self.cmd.SET_HEATING_RUNNING, True)",
"def start(self):\n assert(self._cbs is not None)\n self._as.start() # start the server",
"def start_algorithm(self):\r\n pass",
"def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)",
"def start():\n logging.info(\"Execution Started\")",
"def start_processing(self):",
"def start(self):\n self._call = LoopingCall.withCount(self._update)\n self._call.clock = reactor\n self._call.start(1.0 / self.framerate, now=False)\n self._running = True",
"def start(self):\n self._thread.start()",
"def start_blocking_stream(self, callback, done):\n pa = pyaudio.PyAudio()\n stream = pa.open(rate=self.sample_rate, channels=1, format=pyaudio.paFloat32, output=1,\n frames_per_buffer=self.buffer_size)\n t = 0.0\n # main loop\n while not done(t):\n ts = np.arange(self.buffer_size) / self.sample_rate + t # array of timesteps\n y = np.array([callback(t) for t in ts]) # compute output at each t\n y *= self.volume # scale output by volume\n stream.write(y.astype(np.float32).tobytes()) # write output to audio stream\n t += self.buffer_size / self.sample_rate # increment time\n # terminate\n stream.stop_stream()\n stream.close()\n pa.terminate()",
"def start(self) -> \"bool\":\n return _beamforming_swig.beamformer_sptr_start(self)",
"def connect(self):\n self.start()",
"def __init__(self, run, dataPairs, initial_delay=120, dry_run=False):\n\n super(FlasherThread, self).__init__(name=\"FlasherThread\")\n self.setDaemon(True)\n\n self.__run = run\n self.__data_pairs = dataPairs\n self.__initial_delay = initial_delay\n self.__dry_run = dry_run\n\n self.__sem = threading.BoundedSemaphore()\n\n self.__running = False"
] |
[
"0.68951404",
"0.66808796",
"0.65393025",
"0.63553804",
"0.61745507",
"0.600255",
"0.5989746",
"0.59398526",
"0.59398526",
"0.59398526",
"0.59398526",
"0.58484846",
"0.57963586",
"0.57312804",
"0.57312804",
"0.5725943",
"0.57067126",
"0.5693756",
"0.5692279",
"0.5670867",
"0.5670867",
"0.566982",
"0.56663054",
"0.56663054",
"0.56461084",
"0.56420714",
"0.56246686",
"0.56246686",
"0.56246686",
"0.56246686",
"0.56246686",
"0.56246686",
"0.56246686",
"0.56246686",
"0.5617533",
"0.5607312",
"0.55614865",
"0.5544758",
"0.5520856",
"0.5517209",
"0.551134",
"0.5496896",
"0.5483492",
"0.5470996",
"0.5459671",
"0.5457608",
"0.54449725",
"0.5430377",
"0.54249895",
"0.54032946",
"0.5376373",
"0.5375406",
"0.5375406",
"0.5375406",
"0.53750366",
"0.53586346",
"0.5354474",
"0.5343207",
"0.5342806",
"0.5342664",
"0.53301346",
"0.53200483",
"0.53100425",
"0.5303556",
"0.5299142",
"0.5298622",
"0.5294691",
"0.52844757",
"0.52807623",
"0.527516",
"0.527516",
"0.52724415",
"0.52682835",
"0.5267874",
"0.5262283",
"0.5261575",
"0.5254762",
"0.52534217",
"0.5246193",
"0.5245604",
"0.5245604",
"0.5245604",
"0.5245604",
"0.5236791",
"0.5234988",
"0.52329075",
"0.5222267",
"0.5220972",
"0.5218377",
"0.52162033",
"0.5207932",
"0.5202004",
"0.5193718",
"0.51931053",
"0.5189464",
"0.5186876",
"0.51863617",
"0.5184417",
"0.5173098",
"0.5171867"
] |
0.6352791
|
4
|
Stop a running Betfair ladder stream.
|
def stop_betfair_ladder_stream(self) -> None:
if self.stream is not None:
logger.info("Stopping the Betfair market stream.")
self.stream.stop()
self.stream.listener.output_queue.put("Terminate")
self.stream = None
else:
logger.info("No existing Betfair market stream to stop.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def stop_stream(self):\n pass",
"def stop(self) -> None:\n self._stream.stop()",
"def stop(self):\n\t\tself.stream.stop_stream()",
"def stop(self):\n self.stream.stop()\n self.running = False",
"def stop(self) -> None:\n self._stream.stop()",
"def stop() -> None:",
"def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")",
"async def stop_livestream(self):\n await self.api.stop_livestream(self.product_type, self.serial_no)\n if self.p2p_stream_thread.is_alive() is True:\n await self.p2p_stream_handler.stop()",
"async def async_stop_stream(self) -> None:\n await self._api.request(\n \"post\",\n \"web/equipment/stop_stream\",\n json={\n \"device_sn\": self.serial,\n \"station_sn\": self.station_serial,\n \"proto\": 2,\n },\n )",
"def stop(self) -> None:\n ...",
"def stop(self):\n self._stop_flag = True",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop(self):\n self._running = False",
"def stop(self):\n self._running = False",
"def stop(self):\n self.__running = False",
"def stop(self):\n self._run = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self.running = False",
"def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)",
"def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\r\n self.running = False",
"def stop(self):\r\n self.running = False",
"def stop(self) -> None:\n self._running = False",
"def stop(self) -> None:\n pass",
"def stop (self):\n pass",
"def stop (self):\n pass",
"def stop_streaming_offboard_points(self):\n self.offboard_point_streaming = False\n try:\n self.offboard_point_streaming_thread.join()\n except AttributeError:\n pass",
"def stop(self):\n self._run = False\n self.IA.stop()",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n self.stopped = True",
"def stop(self):\n self.stopped = True",
"def stop(self):\n self.running = False\n self.hop_channel(\"auto\")",
"def stop(self):\n\n self.keep_running = False",
"def stop(self):\n self._log.info(\"Stopping\")\n self._running.clear()",
"def stop(self):\n self.running = False\n self.join()",
"def stop(self):\n\n self.stop_thread = True",
"def stop(self):\r\n self.stopped = True",
"def stop(self):\r\n pass",
"def stop(self):\n self.__publish_cmd(0.0, 0.0)\n\n return",
"def _stop(self):\n\n self.streaming_pull_future.cancel() # Trigger the shutdown.\n self.streaming_pull_future.result() # Block until the shutdown is complete.",
"def stop(self):\n self.api.stop()",
"def stop(self):",
"def stop(self):",
"def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()",
"def stop(self):\n raise NotImplementedError",
"def stop(self):\n raise NotImplementedError",
"def stop(self):\n\t\tpass",
"def stop(self):\n self._stop_signal = True",
"def stop(self):\r\n with self._lock:\r\n self._stopped = True\r\n self.join()",
"def stop(self):\n return",
"def stop(self):\n self._context.state = STOPPED",
"def stop(self):\n self.running = False\n self.cam.stop()\n self.amplifier.stop()\n pass",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n self._run_flag = False\n self.wait()",
"def stop(self):\n if self._running:\n self._running = False\n self._call.stop()",
"def stop_run(arn=None):\n pass",
"def stop(self):\n self.send_stop()\n self.join()",
"def stop(self):\n self.scion_sh('stop')",
"def stop(self):\n self.shutdown = True\n self.rotary_encoder.stop()",
"def stop_running(self):\n self.running = False",
"def stop(self):\n return _uhd_swig.usrp_sink_sptr_stop(self)",
"def stop(self):\r\n self.terminating = True",
"async def stop_rtsp_livestream(self):\n await self.api.stop_rtsp_livestream(self.product_type, self.serial_no)",
"def stop(self) :\n raise NotImplementedError(\"stop not implemented\")",
"def _stop(self):\n self._pi.stop()",
"def stop(self):\n return self.Stop(self._handle)",
"def stop(self):\n if self.current_consumer:\n self.current_consumer.close()\n if self.p:\n self.stream.stop_stream()\n self.stream.close()\n self.p.terminate()\n self.stream = None\n self.p = None\n\n logging.info(\"Stopping SleeptalkPoller, stop event set\")",
"def stop(self):\r\n self.stopped = True\r\n time.sleep(1)",
"def stop(self):\r\n self._stop.set()",
"def stop(self) -> None:\n raise NotImplementedError()",
"def stop(self) -> None:\n raise NotImplementedError()",
"def stop(self):\n self._Thread__stop()",
"def stop(self):\n # indicate that the thread should be stopped\n self.stopped = True\n # wait until stream resources are released (producer thread might be still grabbing frame)\n self.thread.join()",
"def stop(self):\n with self.stop_lock:\n self._stop_event.set()",
"def stop(self):\n self.stopped = True",
"def stop(self):\n self._alive = False",
"def stop(self):\n self._stop = True\n self.wake_up()",
"def stop(self):\n self.prestop()\n if not self._graceful:\n self._graceful = True\n self.stream.stop_stream()\n self.audio.terminate()\n msg = 'Stopped'\n self.verbose_info(msg, log=False)\n # Log 'Stopped' anyway\n if self.log:\n self.logging.info(msg)\n if self.collect:\n if self._data:\n print('Collected result:')\n print(' min: %10d' % self._data['min'])\n print(' max: %10d' % self._data['max'])\n print(' avg: %10d' % int(self._data['avg']))\n self.poststop()"
] |
[
"0.8050214",
"0.80189526",
"0.7999657",
"0.7922578",
"0.7622994",
"0.7271031",
"0.7232619",
"0.71897453",
"0.71808064",
"0.7150011",
"0.71426904",
"0.7123477",
"0.7123477",
"0.7122764",
"0.7122764",
"0.71187675",
"0.7106935",
"0.7099712",
"0.7099712",
"0.7099712",
"0.7099712",
"0.7099712",
"0.70795554",
"0.70795554",
"0.7060017",
"0.7060017",
"0.7060017",
"0.7060017",
"0.70346",
"0.70346",
"0.7025541",
"0.7024756",
"0.70217663",
"0.70217663",
"0.6998262",
"0.69887465",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.6986094",
"0.698421",
"0.698421",
"0.6967654",
"0.6961284",
"0.69382083",
"0.69338596",
"0.69317585",
"0.6908346",
"0.69007856",
"0.6882876",
"0.68820053",
"0.6870988",
"0.685919",
"0.685919",
"0.6854933",
"0.6849801",
"0.6849801",
"0.68449765",
"0.68376505",
"0.68322504",
"0.68321925",
"0.68283564",
"0.6820712",
"0.68192077",
"0.68192077",
"0.68192077",
"0.68192077",
"0.68192077",
"0.68192077",
"0.6812534",
"0.68096113",
"0.68053865",
"0.67854434",
"0.67718035",
"0.6746297",
"0.6741003",
"0.6740211",
"0.6738789",
"0.67387474",
"0.67307436",
"0.67294705",
"0.6729354",
"0.67287344",
"0.67257464",
"0.6723562",
"0.6723562",
"0.67216915",
"0.6709379",
"0.67083824",
"0.6701896",
"0.66958106",
"0.6693832",
"0.6693428"
] |
0.83448356
|
0
|
Returns class by interpreting input string as module path and class name. Module path should be separated by dots as usual. Separate class name from module by '/'.
|
def get_class(string):
logger = logman.getLogger(__name__)
if '/' not in string:
logger.error("The string is not properly formatted. Use '/' to separate module path from classname. String is: {}".format(string))
return
module_name, class_name = string.split('/')
try:
logger.debug('Retrieving class {} from module {}'.format(class_name, module_name))
temp_class = getattr(importlib.import_module(module_name), class_name)
except ModuleNotFoundError:
logger.error("Module not found: {}".format(module_name))
raise
except AttributeError:
logger.error("Class not found: {}".format(class_name))
raise
except:
logger.error("Unexpected error while loading {}".format(string))
raise
return temp_class
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_class(full_class_string):\r\n class_data = full_class_string.split(\".\")\r\n module_path = \".\".join(class_data[:-1])\r\n class_str = class_data[-1]\r\n module = importlib.import_module(module_path)\r\n return getattr(module, class_str)",
"def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_",
"def stringToClass(cls_str):\n import_stg1 = cls_str.split(\" \")[1]\n import_stg2 = import_stg1.replace(\"'\", \"\")\n import_stg3 = import_stg2.replace(\">\", \"\")\n import_parse = import_stg3.split(\".\")\n cls = import_parse[-1]\n import_path = '.'.join(import_parse[:-1])\n import_statement = \"from %s import %s\" % (import_path, cls)\n exec(import_statement)\n assign_statement = \"this_class = %s\" % cls\n exec(assign_statement)\n return this_class",
"def load_class(full_class_string):\n\n class_data = full_class_string.split(\".\")\n module_path = \".\".join(class_data[:-1])\n class_str = class_data[-1]\n\n module = importlib.import_module(module_path)\n # Finally, we retrieve the Class\n return getattr(module, class_str)",
"def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)",
"def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ValueError, AttributeError):\n raise ImportError('Class %s cannot be found (%s).' %\n (class_str,\n traceback.format_exception(*sys.exc_info())))",
"def load_class(full_class_string):\n # todo: cache classes (if this is an overhead)\n class_data = full_class_string.split(\".\")\n module_path = \".\".join(class_data[:-1])\n class_str = class_data[-1]\n module = importlib.import_module(module_path)\n # Finally, we retrieve the Class\n return getattr(module, class_str)",
"def import_class(path):\n components = path.split(\".\")\n module = components[:-1]\n module = \".\".join(module)\n # __import__ needs a native str() on py2\n mod = __import__(module, fromlist=[str(components[-1])])\n return getattr(mod, str(components[-1]))",
"def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n try:\r\n __import__(mod_str)\r\n return getattr(sys.modules[mod_str], class_str)\r\n except (ValueError, AttributeError):\r\n raise ImportError(_('Class %s cannot be found (%s)') %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))",
"def import_class(import_str):\n mod_str, _sep, class_str = import_str.rpartition('.')\n try:\n __import__(mod_str)\n return getattr(sys.modules[mod_str], class_str)\n except (ImportError, ValueError, AttributeError), exc:\n logging.debug('Inner Exception: %s', exc)\n raise",
"def import_class(import_str):\r\n mod_str, _sep, class_str = import_str.rpartition('.')\r\n __import__(mod_str)\r\n try:\r\n return getattr(sys.modules[mod_str], class_str)\r\n except AttributeError:\r\n raise ImportError('Class %s cannot be found (%s)' %\r\n (class_str,\r\n traceback.format_exception(*sys.exc_info())))",
"def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)",
"def importClass(importStr):\n moduleStr, _sep, classStr = importStr.rpartition(\".\")\n \n try:\n __import__(moduleStr)\n return getattr(sys.modules[moduleStr], classStr)\n except (ValueError, AttributeError):\n raise ImportError(\"Class %s cannot be found (%s)\" %\n (classStr, traceback.format_exception(*sys.exc_info())))",
"def load_class(\n fully_qualified_class_name: str\n):\n\n (module_name, fully_qualified_class_name) = fully_qualified_class_name.rsplit('.', 1)\n module_ref = importlib.import_module(module_name)\n class_ref = getattr(module_ref, fully_qualified_class_name)\n\n return class_ref",
"def create_class_from_strings( self, module_name, class_name):\r\n if not( self.logger is None ):\r\n self.logger.debug( \"create class {module_name} {class_name}\" )\r\n\r\n# print( \"create class \" + module_name + \" \" + class_name )\r\n\r\n a_class = getattr( importlib.import_module(module_name), class_name )\r\n instance = a_class( )\r\n return instance",
"def classFromString(className, mod=None):\n if mod is None:\n mod = className\n if className == \"NoneType\":\n cls = None\n else:\n try:\n __import__(mod, globals(), locals(), [], -1)\n cls = sys.modules[mod].__dict__[className]\n except ImportError:\n try:\n cls = eval(\"{0}\".format(className))\n except NameError:\n print('Class \"{0}\" from modue \"{1}\"'\n ' was not found.'.format(className, mod))\n return\n except:\n print('An unanticipated error occurred '\n 'while trying to find Class \"{0}\"'\n ' in module \"{1}\".'.format(className, mod))\n raise\n except:\n print('Module \"{0}\" was not found, terminating'.format(mod))\n raise\n return cls",
"def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)",
"def get_class_from_string(self, classname, module):\n\n myclass = None\n try:\n # Meta language for dinamically import\n myclass = getattr(module, classname)\n except AttributeError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n\n return myclass",
"def get_class(class_name, module_paths=None):\n class_ = locate(class_name)\n if (class_ is None) and (module_paths is not None):\n for module_path in module_paths:\n class_ = locate('.'.join([module_path, class_name]))\n if class_ is not None:\n break\n\n if class_ is None:\n raise ValueError(\n \"Class not found in {}: {}\".format(module_paths, class_name))\n\n return class_",
"def process_path(module_path):\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module",
"def process_path(module_path):\n\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n if module_path == 'StorageDict':\n return 'StorageDict', 'hecuba.hdict'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module",
"def get_class(classname):\n parts = classname.split('.')\n module = '.'.join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m",
"def load_class(path):\r\n\r\n mod_name, klass_name = path.rsplit('.', 1)\r\n\r\n try:\r\n mod = import_module(mod_name)\r\n except AttributeError as e:\r\n raise ImproperlyConfigured('Error importing {0}: \"{1}\"'.format(mod_name, e))\r\n\r\n try:\r\n klass = getattr(mod, klass_name)\r\n except AttributeError:\r\n raise ImproperlyConfigured('Module \"{0}\" does not define a \"{1}\" class'.format(mod_name, klass_name))\r\n\r\n return klass",
"def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)) from err",
"def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n except ValueError as err:\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\n\n module = importlib.import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError as err:\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n ) from err",
"def construct_class_by_name(name, *args, **kwargs):\n parts = name.split('.')\n module_name, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)(*args, **kwargs)",
"def import_class(classpath):\n modname, classname = classpath.rsplit(\".\", 1)\n module = importlib.import_module(modname)\n klass = getattr(module, classname)\n return klass",
"def instantiate_from_string(class_name):\n class_name = convert_underscore_to_camel_case(class_name)\n return globals()[class_name]()",
"def import_string(dotted_path):\n try:\n module_path, class_name = dotted_path.rsplit('.', 1)\n module = import_module(module_path)\n\n try:\n return getattr(module, class_name)\n except AttributeError:\n msg = 'Module \"%s\" does not define a \"%s\" attribute/class' % (\n module_path, class_name)\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])\n\n except ValueError:\n msg = \"%s doesn't look like a module path\" % dotted_path\n six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])",
"def load_class(class_path, setting_name=None):\n try:\n class_module, class_name = class_path.rsplit('.', 1)\n except ValueError:\n if setting_name:\n txt = '%s isn\\'t a valid module. Check your %s setting' % (class_path,setting_name)\n else:\n txt = '%s isn\\'t a valid module.' % class_path\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n mod = import_module(class_module)\n except ImportError, e:\n if setting_name:\n txt = 'Error importing backend %s: \"%s\". Check your %s setting' % (class_module, e, setting_name)\n else:\n txt = 'Error importing backend %s: \"%s\".' % (class_module, e)\n raise exceptions.ImproperlyConfigured(txt)\n \n try:\n clazz = getattr(mod, class_name)\n except AttributeError:\n if setting_name:\n txt = 'Backend module \"%s\" does not define a \"%s\" class. Check your %s setting' % (class_module, class_name, setting_name)\n else:\n txt = 'Backend module \"%s\" does not define a \"%s\" class.' % (class_module, class_name)\n raise exceptions.ImproperlyConfigured(txt)\n return clazz",
"def load_class(specifier):\n module_names, class_names = [], []\n components = specifier.split('.')\n\n for component in components:\n if is_module_name(component):\n module_names.append(component)\n elif is_class_name(component):\n class_names.append(component)\n else:\n raise ValueError(f'Invalid class specifier component: {component}')\n\n if not module_names:\n raise ValueError(f'Class specifier missing module: {specifier}')\n if not class_names:\n raise ValueError(f'Class specifier missing class: {specifier}')\n\n module_path = '.'.join(module_names)\n first_class = class_names[0]\n\n importlib.invalidate_caches()\n module = importlib.import_module(module_path)\n\n cls = module\n for class_name in class_names:\n try:\n cls = getattr(cls, class_name)\n except AttributeError:\n raise ValueError(f'Class not found: {class_name}')\n\n return cls",
"def load_class(full_class_name):\n\n last_dot = full_class_name.rfind('.')\n\n if last_dot == -1:\n message = (\"We require at least two dot-separated components in the \"\n \"class-name [%s].\" % (full_class_name))\n\n logging.exception(message)\n raise Exception(message)\n \n module_name = full_class_name[:last_dot]\n class_name = full_class_name[last_dot + 1:]\n\n logging.debug(\"Loading class [%s] from module [%s].\" % (class_name, \n module_name))\n\n try:\n module = importlib.import_module(module_name)\n except:\n logging.exception(\"Could not import module [%s].\" % (module_name))\n raise\n\n try:\n return module.__dict__[class_name]\n except:\n logging.exception(\"Class [%s] does not exist in module [%s].\" % \n (class_name, module_name))\n raise",
"def dotname2cls(dotname):\n modname, clsname = dotname.rsplit('.', 1)\n return getattr(importlib.import_module(modname), clsname)",
"def class_from_class_path(class_path):\n if class_path not in _CLASS_PATH_TO_CLASS_CACHE:\n module_name, class_name = class_path.rsplit('.', 1)\n m = importlib.import_module(module_name)\n c = getattr(m, class_name)\n _CLASS_PATH_TO_CLASS_CACHE[class_path] = c\n\n return _CLASS_PATH_TO_CLASS_CACHE[class_path]",
"def import_class_from_module_path(path, class_name):\n try:\n module_ = SourceFileLoader('', path).load_module()\n return getattr(module_, class_name)\n except FileNotFoundError:\n raise FileNotFoundError(\"%s not found\" % path)\n except AttributeError:\n raise AttributeError(\"%s class not found in %s\" % (class_name, path))",
"def get_class(kls):\n parts = kls.split('.')\n module = \".\".join(parts[:-1])\n m = __import__( module )\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m",
"def loadModule(module_name, class_name = None):\n mod = importlib.import_module(module_name)\n if class_name == None: return mod\n else: return getattr(mod, class_name)",
"def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)",
"def path_to_class_name(path):\n # type: (unicode) -> unicode\n character_map = {\n ord(\"{\"): None,\n ord(\"}\"): None,\n ord(\"_\"): u\"/\"\n }\n sanitised = path.translate(character_map)\n class_name = u\"\".join(\n # Uppercase the first letter of each non-empty word, while\n # preserving the case of the letters thereafter.\n p[0].upper() + p[1:] for p in sanitised.split(\"/\") if p\n )\n return class_name or ROOT_CLASS_NAME",
"def getPackageModuleClassStr( inClassObj ):\n tempStr= getPythonObjectStrInQuotes( inClassObj )\n tempList= tempStr.split(\".\")\n\n classStrTuple = collections.namedtuple('classStrTuple', 'fullname packageName moduleName className')\n\n return classStrTuple( fullname= tempStr, packageName= tempList[0], moduleName= tempList[1], className= tempList[2] )",
"def import_class(import_path, setting_name=None):\n mod_name, class_name = import_path.rsplit('.', 1)\n\n # import module\n mod = _import_module(mod_name, classnames=(class_name,))\n if mod is not None:\n # Loaded module, get attribute\n try:\n return getattr(mod, class_name)\n except AttributeError:\n pass\n\n # For ImportError and AttributeError, raise the same exception.\n if setting_name:\n raise ImproperlyConfigured(\"{0} does not point to an existing class: {1}\".format(setting_name, import_path))\n else:\n raise ImproperlyConfigured(\"Class not found: {0}\".format(import_path))",
"def get_cls(module_name, class_name, relaxed=True):\n try:\n module = importlib.import_module(module_name)\n except ImportError:\n if relaxed:\n return None\n else:\n raise ImportError(\"Cannot load module: %s\" % module_name)\n try:\n return getattr(module, class_name)\n except AttributeError:\n if relaxed:\n return None\n else:\n raise NotImplementedError(\"Cannot load class: %s.%s\" % (module_name, class_name))",
"def _parse_class_from_resource(self, resource):\n parts = resource.split('/')\n if not parts[-1].rstrip():\n del parts[-1]\n\n cls = parts[-2]\n cls = cls[0].upper() + cls[1:].lower()\n return cls",
"def get_full_path_to_class(cls: Type) -> str:\n mod = cls.__module__\n name = cls.__name__\n\n return f\"{mod}.{name}\"",
"def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c",
"def get_class_import_name(name):\n name = _strip_class_name(name)\n return name",
"def get_submodule_and_class(path, root_dir):\n relative_path = path[len(root_dir):]\n parts = relative_path.split(os.sep)\n project = parts[0]\n clazz = parts[-1][:-5] # Remove the .java extension\n\n return (project, clazz)",
"def get_python_classname(raw_classname):\n class_name = raw_classname.replace(\" \",\"\")\n class_name = class_name.replace(\"-\",\"\")\n return class_name",
"def import_class(self, class_name):\n internal_class_name = class_name.split(\".\")[-1][:-2]\n class_path = class_name.split()[-1].split(\".\")[:-1]\n class_path[0] = class_path[0][1:]\n class_module_path = \".\".join(class_path)\n if internal_class_name in self._project.job_type.job_class_dict:\n module_path = self._project.job_type.job_class_dict[internal_class_name]\n if class_module_path != module_path:\n state.logger.info(\n f'Using registered module \"{module_path}\" instead of custom/old module \"{class_module_path}\" to'\n f' import job type \"{internal_class_name}\"!'\n )\n else:\n module_path = class_module_path\n return getattr(\n importlib.import_module(module_path),\n internal_class_name,\n )",
"def import_from_string(import_path: str) -> Any:\n\n import_classname = import_path.split(\".\")[-1]\n import_module = \".\".join(import_path.split(\".\")[:-1])\n\n module = importlib.import_module(import_module)\n return getattr(module, import_classname)",
"def importClass(class_name, module_name, module_path):\n spec = importlib.util.spec_from_file_location(\n module_name, module_path, submodule_search_locations=[])\n module = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = module\n spec.loader.exec_module(module)\n importlib.invalidate_caches()\n single_class = getattr(module, class_name)\n return single_class",
"def require(path,className=None):\n (dirname, basename) = os.path.split(path)\n packageName = dirname.replace('/','.')\n moduleName = basename.rstrip('.py')\n\n logging.getLogger().debug(\"Loading: %s.%s[%s]\" %(packageName,moduleName,className))\n\n mod = __import__(packageName+'.'+moduleName, globals(), locals(), [className])\n if className:\n return getattr(mod, className)\n\n return mod",
"def get_class(cls):\n return '{}.{}'.format(cls.__module__, cls.__name__)",
"def resolve(name, package=None):\n if isinstance(package, str):\n package = resolve_exposing(package)\n\n if package:\n name = resolve_name('.{}'.format(name), package.__name__)\n\n try:\n # Try to get a module\n return resolve_exposing(name)\n except ImportError as err:\n if '.' not in name:\n raise NotFoundError('{n} is not a valid module name'.format(n=name)) from err\n\n try:\n # Try to get an attribute of a module\n mod, attr = name.rsplit('.', maxsplit=1)\n package = resolve_exposing(mod)\n cls = getattr(package, attr)\n assert(isinstance(cls, type))\n return cls\n except ImportError as err:\n raise NotFoundError('{n} is not a valid class or module name'.format(n=name)) from err\n except AttributeError as err:\n raise NotFoundError('{a} does not exist within {m}'.format(a=attr, m=mod)) from err\n except AssertionError as err:\n raise ResolveError('{a} in {m} is not a valid class'.format(a=attr, m=mod)) from err",
"def find_class_by_name(name, modules):\n modules = [getattr(module, name, None) for module in modules]\n return next(a for a in modules if a)",
"def _modname(cls, full=False):\n module = getattr(cls, '__module__', None)\n if module is None or module == str.__class__.__module__:\n return cls.__name__\n if full and module == \"__main__\":\n import inspect\n the_module = inspect.getmodule(cls)\n spec = getattr(the_module, '__spec__', None)\n if spec is None:\n if the_module.__name__ == '__main__':\n module = '.'.join([the_module.__package__,\n os.path.basename(the_module.__file__.split('.')[0])])\n else:\n module = getattr(the_module, '__package__', None)\n else:\n module = spec.name if spec else module\n return module\n return module + '.' + cls.__class__.__name__",
"def get_cls_name(obj: Any, package_name: bool = True) -> str:\n cls_name = str(obj.__class__)\n # remove class prefix\n cls_name = cls_name.split('\\'')[1]\n # split modules\n cls_split = cls_name.split('.')\n if len(cls_split) > 1:\n cls_name = cls_split[0] + '.' + cls_split[-1] if package_name else cls_split[-1]\n else:\n cls_name = cls_split[0]\n return cls_name",
"def fullname(cls):\n module = cls.__module__\n if module is None or module == str.__class__.__module__:\n return cls.__class__.__name__\n return module + '.' + cls.__class__.__name__",
"def __init__(self, module_name,class_name):\n\n try:\n self.module = importlib.import_module(module_name)\n self.get_class_object = getattr(self.module,class_name)\n \n except:\n print(\"Failed to import the module {} from {}\".format(class_name,module_name))",
"def _classname2instance(\n classname: str,\n arguments: dict,\n namespace: list,\n):\n # find the corresponding operation\n basename = None\n for name in namespace:\n if classname in _get_all_classnames(name):\n basename = name\n break\n assert basename != None, f'cannot found {classname} in {namespace}'\n # return the corresponding module\n return eval(f'basename.{classname}(**arguments)')",
"def import_from_str(value):\n try:\n parts = value.split('.')\n module_path, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_path, package=__name__.rsplit('.', 1)[0])\n return getattr(module, class_name)\n except ImportError as e:\n msg = 'Could not import %s from %s for settings. %s: %s.' % (value, __name__, e.__class__.__name__, e)\n raise ImportError(msg)",
"def module_path_to_class_path(paths: List[str]) -> List[str]:\n class_paths = list()\n for path in paths:\n # loop all paths\n try:\n # Duck test\n # Given path can be imported by using `import_module`, it's a\n # module path.\n module = import_module(path)\n for member in dir(module):\n # check members\n klass = getattr(module, member)\n if type(klass) == type:\n # Collect class path if the member is a class.\n class_path = klass.__module__ + '.' + klass.__name__\n class_paths.append(class_path)\n\n except ModuleNotFoundError:\n # Oops, the path isn't a module path.\n # That may be a class path.\n class_paths.append(path)\n\n return class_paths",
"def cls2dotname(cls):\n return '%s.%s' % (cls.__module__, cls.__name__)",
"def resolve_name(name):\n parts = name.split('.')\n cursor = len(parts)\n module_name, rest = parts[:cursor], parts[cursor:]\n\n while cursor > 0:\n try:\n ret = __import__('.'.join(module_name))\n break\n except ImportError:\n if cursor == 0:\n raise\n cursor -= 1\n module_name = parts[:cursor]\n rest = parts[cursor:]\n ret = ''\n\n for part in parts[1:]:\n try:\n ret = getattr(ret, part)\n except AttributeError:\n raise ImportError\n\n return ret",
"def import_function(s):\n a = s.split('.')\n j = lambda x: '.'.join(x)\n return getattr(import_module(j(a[:-1])), a[-1])",
"def get_module_from_string(self, modulestring):\n\n module = None\n try:\n # Meta language for dinamically import\n module = import_module(modulestring)\n except ImportError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n return module",
"def resolve_class(classref):\n if classref is None:\n return None\n elif isinstance(classref, six.class_types):\n return classref\n elif isinstance(classref, six.string_types):\n return import_class(classref)\n else:\n raise ValueError(\"Unable to resolve class for '%s'\" % classref)",
"def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)",
"def get_module(module_name):\n module = __import__(module_name)\n components = module_name.split('.')\n for comp in components[1:]:\n module = getattr(module,comp)\n return module",
"def get_module_class(module):\n try:\n for name, obj in inspect.getmembers(module):\n # must check for parent module name (should be beacon/codec/etc) as to avoid imported class objects\n if inspect.isclass(obj) and obj.__module__ == module.__name__:\n return obj\n # have it instantiate the object? depends where I decide to use this method: obj_() creates an instance.\n except Exception, e:\n print \"Error getting class from %s module\" % (module.__name__)\n raise",
"def dotted_path(cls):\n return f\"{cls.__module__}.{cls.__qualname__}\"",
"def builder(string):\n return getattr(sys.modules[__name__], string)",
"def create_class(self):\n temp_class = self.temp('separated.class')\n return temp_class.format(**self.__dict__)",
"def get_class(self, name):\n return self.host.get_class(name)",
"def get_module(self):\n module = self.__class__.__module__.split('.')\n module = \".\".join(module[:-1])\n module = module + \".\" + self._get_valid_version().module\n return module",
"def class_name(name: str) -> str:\n return text.pascal_case(utils.safe_snake(name, \"type\"))",
"def get_class(name, base_class, *modules):\n for cls in iter_classes(base_class, *modules, class_filter=lambda x: x.__module__.split('.')[-1] == name):\n return cls\n return None",
"def dynamic_import_from(source_file: str, class_name: str) -> Any:\n module = importlib.import_module(source_file)\n return getattr(module, class_name)",
"def get_backend_by_name(cls_str):\n # type: (str) -> Backend\n try:\n return globals()[cls_str]()\n except KeyError:\n raise InvalidBackendClass('Invalid backend class name: {cls}'.format(cls=cls_str))",
"def _get_module(dotted_path):\n package, module = dotted_path.rsplit('.', 1)\n return getattr(import_module(package), module)",
"def import_object(import_str, *args, **kw):\n try:\n __import__(import_str)\n return sys.modules[import_str]\n except ImportError:\n cls = import_class(import_str)\n return cls(*args, **kw)",
"def get_factory(self, class_name):\n if class_name in self._class_name_class_dict:\n return self._class_name_class_dict[class_name]()\n else:\n raise ModuleNotFoundError(\"Module should be in {}\".format(self.factory_names))",
"def guess_class(self, path, language):\n problem, ext = os.path.splitext(os.path.basename(path))\n mainclass = problem if language in _GUESS_MAINCLASS else None\n return mainclass",
"def get_class(mod, class_name: str):\n for name_val in inspect.getmembers(mod, inspect.isclass):\n name = name_val[0]\n val = name_val[1]\n if name == class_name:\n return val\n return None",
"def get_class_name_from_pkg_name(opts):\n pkg_name = opts[\"package\"]\n return \"\".join(map(str.capitalize, pkg_name.split(\"_\")))",
"def get_class_from_file(_file, saltclass_path):\n # remove classes path prefix\n _file = _file[len(os.path.join(saltclass_path, \"classes\")) + len(os.sep) :]\n # remove .yml extension\n _file = _file[:-4]\n # revert to dotted notation\n _file = _file.replace(os.sep, \".\")\n # remove tailing init\n if _file.endswith(\".init\"):\n _file = _file[:-5]\n return _file",
"def getFilterClass(filterName, pkg=\"ufo2ft.filters\"):\n # TODO add support for third-party plugin discovery?\n # if filter name is 'Foo Bar', the module should be called 'fooBar'\n filterName = filterName.replace(\" \", \"\")\n moduleName = filterName[0].lower() + filterName[1:]\n module = importlib.import_module(\".\".join([pkg, moduleName]))\n # if filter name is 'Foo Bar', the class should be called 'FooBarFilter'\n className = filterName[0].upper() + filterName[1:]\n if not className.endswith(\"Filter\"):\n className += \"Filter\"\n return getattr(module, className)",
"def fixType(typeStr):\n pos = typeStr.rfind('.')\n if pos != -1:\n typeStr = typeStr[pos+1:]\n return classMap.get(typeStr, typeStr)",
"def _strip_class_name(name):\n name = _strip(name)\n if name.find('.') != -1:\n name = name.split('.')[len(name.split('.')) - 1]\n return name",
"def get_class_file_name(name):\n name = _strip_class_name(name)\n return name + FILE_EXTENSION",
"def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False",
"def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]",
"def path_to_module(path: str):\n if path[-3:] != '.py':\n raise ValueError(\"Not a python module path \", path)\n path_minus_py = path[:-3]\n path_w_dots = path_minus_py.replace(\"/\", \".\")\n # strip leading docs\n if path_w_dots[0] == '.':\n path_w_dots = path_w_dots[1:]\n # if there was an __init__ then strip that\n if path_w_dots[-9:] == \".__init__\":\n path_w_dots = path_w_dots[:-9]\n return path_w_dots",
"def _import_string(import_name):\n if \".\" in import_name:\n module, obj = import_name.rsplit(\".\", 1)\n else:\n return importlib.import_module(import_name)\n return getattr(importlib.import_module(module), obj)",
"def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class",
"def easy_import(pkg_name, module_name):\n try:\n pkg = __import__(pkg_name, fromlist=[module_name])\n except ImportError ,e:\n print \"Erorr importing %s from %s\" % (module_name, pkg_name)\n raise\n module = getattr(pkg,module_name)\n return Controller.get_module_class(module)",
"def resolve_object(object_string):\n (module_name, object_name) = object_string.rsplit(\".\", 1)\n \n try:\n module = import_module(module_name)\n except ImportError, exc:\n raise ValueError(\"Could not import module %s: %s\" % (module_name, exc))\n \n if not hasattr(module, object_name):\n raise ValueError(\"Module %s does not have object %s\" %\n (module_name, object_name))\n \n return getattr(module, object_name)",
"def string_to_class(names):\n return [eval(name) for name in names]",
"def import_object(name):\n parts = name.split('.')\n obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)\n return getattr(obj, parts[-1])",
"def find_class(self, class_name: str) -> Type:\n pass"
] |
[
"0.7585165",
"0.7544114",
"0.7541174",
"0.7536624",
"0.75242513",
"0.74492747",
"0.7387409",
"0.7375805",
"0.73551124",
"0.7353887",
"0.7324937",
"0.72359395",
"0.72320575",
"0.72180814",
"0.71838266",
"0.71719867",
"0.7155751",
"0.7124456",
"0.711404",
"0.7071752",
"0.7059211",
"0.6968013",
"0.69621795",
"0.68345225",
"0.6799715",
"0.6759073",
"0.67556685",
"0.6755289",
"0.67147267",
"0.6701341",
"0.667013",
"0.6660378",
"0.66405517",
"0.65854245",
"0.65473926",
"0.6468743",
"0.64559746",
"0.6422263",
"0.642023",
"0.6344775",
"0.62286747",
"0.6218633",
"0.61928606",
"0.6179392",
"0.6163509",
"0.6088778",
"0.60642076",
"0.60074395",
"0.60057765",
"0.59606624",
"0.5960564",
"0.5940086",
"0.5938277",
"0.59205204",
"0.5917979",
"0.58904356",
"0.5873231",
"0.58511454",
"0.58466715",
"0.5846147",
"0.58378756",
"0.5797025",
"0.5780425",
"0.57227784",
"0.57028055",
"0.5702238",
"0.5695357",
"0.5692326",
"0.5671205",
"0.565731",
"0.56441236",
"0.56388646",
"0.5633079",
"0.56304085",
"0.5626003",
"0.5623146",
"0.5618173",
"0.56159925",
"0.5612455",
"0.55960757",
"0.5595293",
"0.5594373",
"0.5594326",
"0.55795205",
"0.55787563",
"0.55705273",
"0.5550061",
"0.55458814",
"0.5543323",
"0.5543175",
"0.55397755",
"0.5539536",
"0.5515102",
"0.5514305",
"0.5492537",
"0.54915863",
"0.5480245",
"0.54622006",
"0.5455602",
"0.54509515"
] |
0.87269014
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.